repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
iki-taichi/tf-keras-transformer | [
"613122705583c0274b0c9be0993f3bbeb240932d"
]
| [
"src/get_kyoto_corpus.py"
]
| [
"# coding: utf-8\n\n\n\"\"\"\nDownload Japanese-English Bilingual Corpus of Wikipedia's Kyoto Articles\nAnd make it csv file for transformer fitting\n\nYou can read documents about the license of the corpus at the following url: \nhttps://alaginrc.nict.go.jp/WikiCorpus/index_E.html#license \n\"\"\"\n\n\nimport argparse\n\nimport os\nfrom urllib.request import urlretrieve\nimport tarfile\n\nimport csv\nimport glob\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\n\n\nDOWNLOAD_URL='https://alaginrc.nict.go.jp/WikiCorpus/cgi-bin/dl1.cgi'\nTARGET_FILE_NAME = 'wiki_corpus_2.01.tar.gz'\n\n\ndef download_data(work_dir):\n if not os.path.exists(work_dir):\n os.makedirs(work_dir)\n \n file_path = os.path.join(work_dir, TARGET_FILE_NAME)\n urlretrieve(DOWNLOAD_URL, file_path)\n\n tar = tarfile.open(file_path, 'r:gz') \n tar.extractall(work_dir) \n tar.close()\n\n\ndef by_sentence(input_path, output_path, valid_ratio):\n target_path = os.path.join(input_path, '*/*.xml')\n xmls = glob.glob(target_path, recursive=True)\n print('to convert %d xml files...'%(len(xmls)))\n \n pairs = []\n\n for xml in xmls:\n try:\n root = ET.parse(xml).getroot()\n except Exception as e:\n print('%s skipped because of %s'%(xml, e))\n continue\n \n for sen in root.findall('.//sen'):\n ja = sen.find('j')\n en = sen.find('e[@type=\\'check\\']')\n\n if ja is not None and en is not None:\n pairs.append([en.text, ja.text])\n \n if len(pairs) < 5:\n print('data sample:(%s)%s'%(xml, pairs[-1]))\n \n created_files = []\n \n if valid_ratio == 0:\n with open(output_path, 'w') as f:\n csv.writer(f).writerows(pairs)\n created_files.append(output_path)\n else:\n output_path_prefix, ext = os.path.splitext(output_path)\n output_path_valid = output_path_prefix + '_valid' + ext\n valid_len = int(len(pairs)*valid_ratio)\n \n np.random.shuffle(pairs)\n \n with open(output_path_valid, 'w') as f:\n csv.writer(f).writerows(pairs[:valid_len])\n created_files.append(output_path_valid)\n \n with open(output_path, 'w') as f:\n csv.writer(f).writerows(pairs[valid_len:])\n created_files.append(output_path)\n \n return created_files\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Make csv files.')\n parser.add_argument(\n '-w', '--work_dir', type=str, default='data/kyoto_corpus',\n help='path to working dir where downloaded data is expanded'\n )\n parser.add_argument(\n '-o', '--output_path', type=str, default='data/kyoto_en_ja.csv',\n help='path to output file'\n )\n parser.add_argument(\n '-v', '--valid_ratio', type=float, default=0.1,\n help='ratio of rows for validation'\n )\n \n args = parser.parse_args()\n \n print('start downloading...')\n download_data(args.work_dir)\n print('downloaded')\n \n print('start editing files...')\n created_files = by_sentence(args.work_dir, args.output_path, args.valid_ratio)\n print('%s were created'%(','.join(created_files)))\n "
]
| [
[
"numpy.random.shuffle"
]
]
|
google-research/remixmatch | [
"f7061ebf055227cbeb5c6fced1ce054e0ceecfcd"
]
| [
"scripts/create_datasets.py"
]
| [
"#!/usr/bin/env python\n\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script to download all datasets and create .tfrecord files.\n\"\"\"\n\nimport collections\nimport gzip\nimport os\nimport tarfile\nimport tempfile\nfrom urllib import request\n\nimport numpy as np\nimport scipy.io\nimport tensorflow as tf\nfrom absl import app\nfrom tqdm import trange\n\nfrom libml import data as libml_data\nfrom libml.utils import EasyDict\n\nURLS = {\n 'svhn': 'http://ufldl.stanford.edu/housenumbers/{}_32x32.mat',\n 'cifar10': 'https://www.cs.toronto.edu/~kriz/cifar-10-matlab.tar.gz',\n 'cifar100': 'https://www.cs.toronto.edu/~kriz/cifar-100-matlab.tar.gz',\n 'stl10': 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz',\n}\n\n\ndef _encode_png(images):\n raw = []\n with tf.Session() as sess, tf.device('cpu:0'):\n image_x = tf.placeholder(tf.uint8, [None, None, None], 'image_x')\n to_png = tf.image.encode_png(image_x)\n for x in trange(images.shape[0], desc='PNG Encoding', leave=False):\n raw.append(sess.run(to_png, feed_dict={image_x: images[x]}))\n return raw\n\n\ndef _load_svhn():\n splits = collections.OrderedDict()\n for split in ['train', 'test', 'extra']:\n with tempfile.NamedTemporaryFile() as f:\n request.urlretrieve(URLS['svhn'].format(split), f.name)\n data_dict = scipy.io.loadmat(f.name)\n dataset = {}\n dataset['images'] = np.transpose(data_dict['X'], [3, 0, 1, 2])\n dataset['images'] = _encode_png(dataset['images'])\n dataset['labels'] = data_dict['y'].reshape((-1))\n # SVHN raw data uses labels from 1 to 10; use 0 to 9 instead.\n dataset['labels'] -= 1\n splits[split] = dataset\n return splits\n\n\ndef _load_stl10():\n def unflatten(images):\n return np.transpose(images.reshape((-1, 3, 96, 96)),\n [0, 3, 2, 1])\n\n with tempfile.NamedTemporaryFile() as f:\n if tf.gfile.Exists('stl10/stl10_binary.tar.gz'):\n f = tf.gfile.Open('stl10/stl10_binary.tar.gz', 'rb')\n else:\n request.urlretrieve(URLS['stl10'], f.name)\n tar = tarfile.open(fileobj=f)\n train_X = tar.extractfile('stl10_binary/train_X.bin')\n train_y = tar.extractfile('stl10_binary/train_y.bin')\n\n test_X = tar.extractfile('stl10_binary/test_X.bin')\n test_y = tar.extractfile('stl10_binary/test_y.bin')\n\n unlabeled_X = tar.extractfile('stl10_binary/unlabeled_X.bin')\n\n train_set = {'images': np.frombuffer(train_X.read(), dtype=np.uint8),\n 'labels': np.frombuffer(train_y.read(), dtype=np.uint8) - 1}\n\n test_set = {'images': np.frombuffer(test_X.read(), dtype=np.uint8),\n 'labels': np.frombuffer(test_y.read(), dtype=np.uint8) - 1}\n\n _imgs = np.frombuffer(unlabeled_X.read(), dtype=np.uint8)\n unlabeled_set = {'images': _imgs,\n 'labels': np.zeros(100000, dtype=np.uint8)}\n\n fold_indices = tar.extractfile('stl10_binary/fold_indices.txt').read()\n\n train_set['images'] = _encode_png(unflatten(train_set['images']))\n test_set['images'] = _encode_png(unflatten(test_set['images']))\n unlabeled_set['images'] = _encode_png(unflatten(unlabeled_set['images']))\n return dict(train=train_set, test=test_set, unlabeled=unlabeled_set,\n files=[EasyDict(filename=\"stl10_fold_indices.txt\", data=fold_indices)])\n\n\ndef _load_cifar10():\n def unflatten(images):\n return np.transpose(images.reshape((images.shape[0], 3, 32, 32)),\n [0, 2, 3, 1])\n\n with tempfile.NamedTemporaryFile() as f:\n request.urlretrieve(URLS['cifar10'], f.name)\n tar = tarfile.open(fileobj=f)\n train_data_batches, train_data_labels = [], []\n for batch in range(1, 6):\n data_dict = scipy.io.loadmat(tar.extractfile(\n 'cifar-10-batches-mat/data_batch_{}.mat'.format(batch)))\n train_data_batches.append(data_dict['data'])\n train_data_labels.append(data_dict['labels'].flatten())\n train_set = {'images': np.concatenate(train_data_batches, axis=0),\n 'labels': np.concatenate(train_data_labels, axis=0)}\n data_dict = scipy.io.loadmat(tar.extractfile(\n 'cifar-10-batches-mat/test_batch.mat'))\n test_set = {'images': data_dict['data'],\n 'labels': data_dict['labels'].flatten()}\n train_set['images'] = _encode_png(unflatten(train_set['images']))\n test_set['images'] = _encode_png(unflatten(test_set['images']))\n return dict(train=train_set, test=test_set)\n\n\ndef _load_cifar100():\n def unflatten(images):\n return np.transpose(images.reshape((images.shape[0], 3, 32, 32)),\n [0, 2, 3, 1])\n\n with tempfile.NamedTemporaryFile() as f:\n request.urlretrieve(URLS['cifar100'], f.name)\n tar = tarfile.open(fileobj=f)\n data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/train.mat'))\n train_set = {'images': data_dict['data'],\n 'labels': data_dict['fine_labels'].flatten()}\n data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/test.mat'))\n test_set = {'images': data_dict['data'],\n 'labels': data_dict['fine_labels'].flatten()}\n train_set['images'] = _encode_png(unflatten(train_set['images']))\n test_set['images'] = _encode_png(unflatten(test_set['images']))\n return dict(train=train_set, test=test_set)\n\n\ndef _load_fashionmnist():\n def _read32(data):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(data.read(4), dtype=dt)[0]\n\n image_filename = '{}-images-idx3-ubyte'\n label_filename = '{}-labels-idx1-ubyte'\n split_files = [('train', 'train'), ('test', 't10k')]\n splits = {}\n for split, split_file in split_files:\n with tempfile.NamedTemporaryFile() as f:\n request.urlretrieve(URLS['fashion_mnist'].format(image_filename.format(split_file)), f.name)\n with gzip.GzipFile(fileobj=f, mode='r') as data:\n assert _read32(data) == 2051\n n_images = _read32(data)\n row = _read32(data)\n col = _read32(data)\n images = np.frombuffer(data.read(n_images * row * col), dtype=np.uint8)\n images = images.reshape((n_images, row, col, 1))\n with tempfile.NamedTemporaryFile() as f:\n request.urlretrieve(URLS['fashion_mnist'].format(label_filename.format(split_file)), f.name)\n with gzip.GzipFile(fileobj=f, mode='r') as data:\n assert _read32(data) == 2049\n n_labels = _read32(data)\n labels = np.frombuffer(data.read(n_labels), dtype=np.uint8)\n splits[split] = {'images': _encode_png(images), 'labels': labels}\n return splits\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _save_as_tfrecord(data, filename):\n assert len(data['images']) == len(data['labels'])\n filename = os.path.join(libml_data.DATA_DIR, filename + '.tfrecord')\n print('Saving dataset:', filename)\n with tf.python_io.TFRecordWriter(filename) as writer:\n for x in trange(len(data['images']), desc='Building records'):\n feat = dict(image=_bytes_feature(data['images'][x]),\n label=_int64_feature(data['labels'][x]))\n record = tf.train.Example(features=tf.train.Features(feature=feat))\n writer.write(record.SerializeToString())\n print('Saved:', filename)\n\n\ndef _is_installed(name, checksums):\n for subset, checksum in checksums.items():\n filename = os.path.join(libml_data.DATA_DIR, '%s-%s.tfrecord' % (name, subset))\n if not tf.gfile.Exists(filename):\n return False\n return True\n\n\ndef _save_files(files, *args, **kwargs):\n del args, kwargs\n for folder in frozenset(os.path.dirname(x) for x in files):\n tf.gfile.MakeDirs(os.path.join(libml_data.DATA_DIR, folder))\n for filename, contents in files.items():\n with tf.gfile.Open(os.path.join(libml_data.DATA_DIR, filename), 'w') as f:\n f.write(contents)\n\n\ndef _is_installed_folder(name, folder):\n return tf.gfile.Exists(os.path.join(libml_data.DATA_DIR, name, folder))\n\n\nCONFIGS = dict(\n cifar10=dict(loader=_load_cifar10, checksums=dict(train=None, test=None)),\n cifar100=dict(loader=_load_cifar100, checksums=dict(train=None, test=None)),\n svhn=dict(loader=_load_svhn, checksums=dict(train=None, test=None, extra=None)),\n stl10=dict(loader=_load_stl10, checksums=dict(train=None, test=None)),\n # fashion_mnist=dict(loader=_load_fashionmnist, checksums=dict(train=None, test=None)),\n)\n\n\ndef main(argv):\n if len(argv[1:]):\n subset = set(argv[1:])\n else:\n subset = set(CONFIGS.keys())\n tf.gfile.MakeDirs(libml_data.DATA_DIR)\n for name, config in CONFIGS.items():\n if name not in subset:\n continue\n if 'is_installed' in config:\n if config['is_installed']():\n print('Skipping already installed:', name)\n continue\n elif _is_installed(name, config['checksums']):\n print('Skipping already installed:', name)\n continue\n print('Preparing', name)\n datas = config['loader']()\n saver = config.get('saver', _save_as_tfrecord)\n for sub_name, data in datas.items():\n if sub_name == 'readme':\n filename = os.path.join(libml_data.DATA_DIR, '%s-%s.txt' % (name, sub_name))\n with tf.gfile.Open(filename, 'w') as f:\n f.write(data)\n elif sub_name == 'files':\n for file_and_data in data:\n path = os.path.join(libml_data.DATA_DIR, file_and_data.filename)\n with tf.gfile.Open(path, \"wb\") as f:\n f.write(file_and_data.data)\n else:\n saver(data, '%s-%s' % (name, sub_name))\n\n\nif __name__ == '__main__':\n app.run(main)\n"
]
| [
[
"numpy.concatenate",
"tensorflow.train.BytesList",
"tensorflow.train.Int64List",
"tensorflow.train.Features",
"numpy.zeros",
"tensorflow.gfile.Open",
"tensorflow.Session",
"tensorflow.gfile.Exists",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.gfile.MakeDirs",
"tensorflow.placeholder",
"numpy.transpose",
"tensorflow.device",
"tensorflow.image.encode_png",
"numpy.dtype"
]
]
|
ranarango/fuegos-orinoquia | [
"d82941ef0c90fe66162c8678b6f4a4c010d4313b"
]
| [
"src/06_fuel/10_plot_rainfall_correlations.py"
]
| [
"\"\"\"\n\n\"\"\"\nimport os\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport pandas as pd\nimport seaborn as sns\n\nfrom src.utils.constants import REGIONS\n\n\nif __name__ == \"__main__\":\n\n # Project's root\n os.chdir(\"../..\")\n\n fig = plt.figure(figsize=(11.69, 8.27))\n outer = gridspec.GridSpec(2, 2, wspace=0.2, hspace=0.35)\n\n fn = \"results/xlsx/burned_area_rainfall_corr.xlsx\"\n spatial_corr = pd.read_excel(fn, sheet_name=\"Spatial\")\n temporal_corr = pd.read_excel(fn, sheet_name=\"Temporal\")\n\n for i, region in enumerate(REGIONS):\n\n inner = gridspec.GridSpecFromSubplotSpec(\n 2, 1, subplot_spec=outer[i], wspace=0.1, hspace=0.45\n )\n\n axt = plt.Subplot(fig, inner[0])\n axb = plt.Subplot(fig, inner[1])\n\n region_name = region.get(\"name\")\n fn = f\"results/xlsx/{region_name}/burned_area_rainfall_values.xlsx\"\n spatial_values = pd.read_excel(fn, sheet_name=\"Spatial\")\n temporal_values = pd.read_excel(fn, sheet_name=\"Temporal\")\n\n sns.regplot(\n x=\"burned_area\",\n y=\"rainfall\",\n data=spatial_values,\n ax=axt,\n color=\"#263238\",\n line_kws=dict(linewidth=1.5),\n scatter_kws=dict(s=3, alpha=0.5),\n )\n n = spatial_corr.loc[spatial_corr[\"region\"] == region_name, \"n\"].iloc[0]\n axt.text(0.8, 0.85, f\"$n = {n}$\", transform=axt.transAxes, fontsize=7)\n r = spatial_corr.loc[spatial_corr[\"region\"] == region_name, \"r\"].iloc[0]\n p = spatial_corr.loc[spatial_corr[\"region\"] == region_name, \"p_value\"].iloc[0]\n if p < 0.05:\n text = f\"$r = {r:.2f}^*$\"\n else:\n text = f\"$r = {r:.2f}$\"\n axt.text(0.8, 0.75, text, transform=axt.transAxes, fontsize=7)\n axt.set_title(region[\"name\"].upper(), fontsize=8)\n axt.set_xlabel(\"Burned area (ha)\", fontsize=8)\n axt.set_ylabel(\"Precipitation (mm)\", fontsize=8)\n axt.tick_params(labelsize=8)\n fig.add_subplot(axt)\n\n sns.regplot(\n x=\"area\",\n y=\"rainfall\",\n data=temporal_values,\n ax=axb,\n color=\"#263238\",\n line_kws=dict(linewidth=1.5),\n scatter_kws=dict(s=3, alpha=0.5),\n )\n n = temporal_corr.loc[temporal_corr[\"region\"] == region_name, \"n\"].iloc[0]\n axb.text(0.8, 0.85, f\"$n = {n}$\", transform=axb.transAxes, fontsize=7)\n r = temporal_corr.loc[temporal_corr[\"region\"] == region_name, \"r\"].iloc[0]\n p = temporal_corr.loc[temporal_corr[\"region\"] == region_name, \"p_value\"].iloc[0]\n if p < 0.05:\n text = f\"$r = {r:.2f}^*$\"\n else:\n text = f\"$r = {r:.2f}$\"\n axb.text(0.8, 0.75, text, transform=axb.transAxes, fontsize=7)\n axb.set_xlabel(\"Burned area (ha)\", fontsize=8)\n axb.set_ylabel(\"Precipitation (mm)\", fontsize=8)\n axb.xaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.0e\"))\n axb.tick_params(labelsize=8)\n fig.add_subplot(axb)\n\n output_folder = \"figures\"\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n save_to = os.path.join(output_folder, \"burned_area_rainfall_corr.pdf\")\n fig.savefig(save_to, bbox_inches=\"tight\")\n"
]
| [
[
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"pandas.read_excel",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.Subplot",
"matplotlib.gridspec.GridSpec"
]
]
|
fffasttime/coiltraine | [
"d08e8c5c02219998e2952abff4f50b93969e3736"
]
| [
"errorinsert/EI_modules.py"
]
| [
"# Copy from reliability\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport random\nimport numpy as np\n\n\n__all__ = ['Conv2dEI', 'LinearEI']\n\n\ndef get_default_kwargs(kwargs):\n default = {\n 'tmr': False,\n 'ei_prob': 1e-3,\n 'nbits': 8,\n\n 'nw': -1,\n 'ei_bits': False,\n\n 'wg_flag': False,\n 'bound1': 0,\n 'bound2': 0,\n 'wg_sort': False,\n\n 'channel_flag': False,\n 'tmr_array': 0\n }\n for k, v in default.items():\n if k not in kwargs:\n kwargs[k] = v\n return kwargs\n\n\ndef EI(x, ei_prob, inplace=False):\n if inplace:\n x_view = x.view(-1)\n else:\n x_view = x.clone().view(-1)\n # 1. generate random code\n total_bits = x.numel() * 8\n error_bits = int(total_bits * ei_prob)\n ei_position = random.sample(range(total_bits), error_bits)\n\n for ei_p in ei_position:\n idx_value = int(ei_p / 8)\n idx_bit = ei_p % 8\n x_view[idx_value] = reverse_bit(x_view[idx_value], idx_bit)\n return x_view.view(x.shape)\n\n#当对channel-wise tmr配置时的注错方式,部分channel不注错就是保护\ndef EI_channel(x, ei_prob, tmr_array, channel_num, inplace=False):\n #print('tmr_array:{}'.format(tmr_array))\n if inplace:\n x_view = x.view(-1)\n else:\n x_view = x.clone().view(-1)\n total_bits = x.numel() * 8\n part = int(total_bits / channel_num)\n ei_bits = []\n #拼接要注错的channel的索引\n for i in range(channel_num):\n if tmr_array[i] == 1:\n continue\n for b in range(i*part, (i+1)*part):\n ei_bits.append(b)\n error_bits = int(len(ei_bits) * ei_prob)\n ei_position = random.sample(ei_bits, error_bits)\n\n for ei_p in ei_position:\n idx_value = int(ei_p / 8)\n idx_bit = ei_p % 8\n x_view[idx_value] = reverse_bit(x_view[idx_value], idx_bit)\n return x_view.view(x.shape)\n\n\ndef EI_w(x, ei_prob, bound1, bound2, sort=False, inplace=False):\n if inplace:\n x_view = x.view(-1)\n else:\n x_view = x.clone().view(-1)\n _, indices = torch.sort(x_view) # 升序排序\n total_bits = int(bound2 - bound1) * 8\n error_bits = int(total_bits * ei_prob)\n ei_position = random.sample(range(total_bits), error_bits)\n\n for ei_p in ei_position:\n idx_value = int(ei_p / 8) + bound1\n if sort:\n idx_value = indices[idx_value]\n else:\n idx_value = int(ei_p / 8) + bound1\n idx_bit = ei_p % 8\n x_view[idx_value] = reverse_bit(x_view[idx_value], idx_bit)\n return x_view.view(x.shape)\n\ndef EI_bit(x, num, inplace=False):\n if inplace:\n x_view = x.view(-1)\n else:\n x_view = x.clone().view(-1)\n idx_value = int(num/8)\n idx_bit = num % 8\n x_view[idx_value] = reverse_bit(x_view[idx_value], idx_bit)\n return x_view.view(x.shape)\n\ndef reverse_bit(value, bit_position):\n bitmask = 2 ** bit_position\n if bit_position == 7:\n bitmask = - 2 ** bit_position\n value = int(value.item()) ^ int(bitmask)\n return value\n\nclass Conv2dEI(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True, **kwargs):\n super(Conv2dEI, self).__init__(\n in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,\n stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)\n self.kwargs = get_default_kwargs(kwargs)\n self.tmr = self.kwargs['tmr']\n self.ei_prob = self.kwargs['ei_prob']\n self.nbits = self.kwargs['nbits']\n self.nw = self.kwargs['nw']\n self.ei_bits = self.kwargs['ei_bits']\n\n if self.nbits <= 0:\n self.register_buffer('radix_position', None)\n self.register_buffer('init_state', None)\n else:\n # (input, weight) 两个 radix_position\n self.register_buffer('radix_position', torch.zeros(2))\n self.register_buffer('init_state', torch.zeros(1))\n\n def forward(self, x):\n if self.nbits <= 0:\n return self._conv_forward(x, self.weight)\n\n Qn = -2 ** (self.nbits - 1)\n Qp = 2 ** (self.nbits - 1) - 1\n\n if self.init_state == 0:\n il = torch.log2(self.weight.abs().max()) + 1\n il = math.ceil(il - 1e-5)\n self.radix_position[1].data.fill_(self.nbits - il)\n print('Initialize radix position of {} weight with {}'.format(\n self._get_name(), int(self.radix_position[1].item())))\n\n il = torch.log2(x.abs().max()) + 1\n il = math.ceil(il - 1e-5)\n self.radix_position[0].data.fill_(self.nbits - il)\n print('Initialize radix position of {} input with {}'.format(\n self._get_name(), int(self.radix_position[0].item())))\n self.init_state.fill_(1)\n\n alpha_i = 2 ** self.radix_position[0]\n alpha_w = 2 ** self.radix_position[1]\n x_int = round_pass((x * alpha_i).clamp(Qn, Qp))\n w_int = round_pass((self.weight * alpha_w).clamp(Qn, Qp))\n\n x_int_ei = EI(x_int, self.ei_prob * 2)\n w_int_ei = EI(w_int, self.ei_prob)\n\n # tmr here\n\n x_ei = x_int_ei / alpha_i\n w_ei = w_int_ei / alpha_w\n return self._conv_forward(x_ei, w_ei)\n\n def extra_repr(self):\n s_prefix = super(Conv2dEI, self).extra_repr()\n return '{}, {}'.format(s_prefix, self.kwargs)\n\n\nclass LinearEI(nn.Linear):\n def __init__(self, in_features, out_features, bias=True, **kwargs):\n super(LinearEI, self).__init__(in_features=in_features, out_features=out_features, bias=bias)\n self.kwargs = get_default_kwargs(kwargs)\n self.tmr = self.kwargs['tmr']\n self.ei_prob = self.kwargs['ei_prob']\n self.nbits = self.kwargs['nbits']\n \n self.nw = self.kwargs['nw']\n self.ei_bits = self.kwargs['ei_bits']\n\n self.bound1 = self.kwargs['bound1']\n self.bound2 = self.kwargs['bound2']\n self.wg_flag = self.kwargs['wg_flag']\n self.wg_sort = self.kwargs['wg_sort'] # 是否排序后进行划分group\n self.channel_flag = self.kwargs['channel_flag'] #是否使用EI_channel\n self.tmr_array = self.kwargs['tmr_array'] #当使用EI_channel要传入的参数\n \n if self.nbits <= 0:\n self.register_buffer('radix_position', None)\n self.register_buffer('init_state', None)\n else:\n # (input, weight) 两个 radix_position\n self.register_buffer('radix_position', torch.zeros(2))\n self.register_buffer('init_state', torch.zeros(1))\n\n def forward(self, x):\n if self.nbits <= 0:\n return F.linear(x, self.weight, self.bias)\n\n Qn = -2 ** (self.nbits - 1)\n Qp = 2 ** (self.nbits - 1) - 1\n if self.init_state == 0 and x.shape[0] >= 32: # batch_size >= 32\n il = torch.log2(self.weight.abs().max()) + 1\n il = math.ceil(il - 1e-5)\n self.radix_position[1].data.fill_(self.nbits - il)\n print('Initialize radix position of {} weight with {}'.format(\n self._get_name(), int(self.radix_position[1].item())))\n\n # batch size; remove sqrt[bs] outliers. topk\n batch_size = x.shape[0]\n il = torch.log2(x.abs().view(-1).topk(int(math.sqrt(batch_size)))[0][-1]) + 1\n il = math.ceil(il - 1e-5)\n self.radix_position[0].data.fill_(self.nbits - il)\n print('Initialize radix position of {} input with {}'.format(\n self._get_name(), int(self.radix_position[0].item())))\n self.init_state.fill_(1)\n\n alpha_i = 2 ** self.radix_position[0]\n alpha_w = 2 ** self.radix_position[1]\n x_int = round_pass((x * alpha_i).clamp(Qn, Qp))\n w_int = round_pass((self.weight * alpha_w).clamp(Qn, Qp))\n\n if self.ei_bits and self.nw >= 0:\n w_int_ei = EI_bit(w_int, self.nw, self.ei_prob)\n w_ei = w_int_ei / alpha_w\n return F.linear(x_int / alpha_i, w_ei, self.bias)\n\n if self.wg_flag:\n w_int_ei = EI_w(w_int, self.ei_prob, self.bound1, self.bound2, self.wg_sort)\n w_ei = w_int_ei / alpha_w\n return F.linear(x_int / alpha_i, w_ei, self.bias)\n\n if self.channel_flag:\n w_int_ei = EI_channel(w_int, self.ei_prob, self.tmr_array, self.in_features)\n w_ei = w_int_ei / alpha_w\n return F.linear(x_int / alpha_i, w_ei, self.bias)\n\n x_int_ei = EI(x_int, self.ei_prob * 2)\n w_int_ei = EI(w_int, self.ei_prob)\n if self.tmr:\n x2 = EI(x_int, self.ei_prob * 2)\n x3 = EI(x_int, self.ei_prob * 2)\n x_int_ei = TMR(x_int_ei, x2, x3)\n w2 = EI(w_int, self.ei_prob)\n w3 = EI(w_int, self.ei_prob)\n w_int_ei = TMR(w_int_ei, w2, w3)\n x_ei = x_int_ei / alpha_i\n w_ei = w_int_ei / alpha_w\n return F.linear(x_ei, w_ei, self.bias)\n\n def set_ei_prob(self, value):\n self.ei_prob = value\n self.kwargs['ei_prob'] = value\n\n def set_tmr(self, value):\n self.tmr = value\n self.kwargs['tmr'] = value\n\n def set_ei_bits(self, value):\n self.ei_bits = value\n self.kwargs['ei_bits'] = value\n\n def set_wg_flag(self, value):\n self.wg_flag = value\n self.kwargs['wg_flag'] = value\n\n def set_wg_sort(self, value):\n self.wg_sort = value\n self.kwargs['wg_sort'] = value\n\n def set_nw(self, value):\n self.nw = value\n self.kwargs['nw'] = value\n\n def set_bound(self, value1, value2):\n self.bound1 = value1\n self.kwargs['bound1'] = value1\n self.bound2 = value2\n self.kwargs['bound2'] = value2\n\n def set_channel_flag(self, value):\n self.channel_flag = value\n self.kwargs['channel_flag'] = value\n\n def set_tmr_array(self, value):\n self.tmr_array = value\n self.kwargs['tmr_array'] = value\n\n def extra_repr(self):\n s_prefix = super(LinearEI, self).extra_repr()\n return '{}, {}'.format(s_prefix, self.kwargs)\n\n\ndef round_pass(x):\n y = x.round()\n y_grad = x\n return y.detach() - y_grad.detach() + y_grad\n"
]
| [
[
"torch.zeros",
"torch.nn.functional.linear",
"torch.sort"
]
]
|
geeks121/coinbase-exchange-order-book | [
"4eae6025e567dd50ba1ac2b9727e0e75b6bf2e40"
]
| [
"main.py"
]
| [
"import asyncio\nfrom datetime import datetime\nfrom decimal import Decimal\nimport argparse\n\nimport functools\nimport pytz\nfrom trading import file_logger as trading_file_logger\nfrom orderbook import file_logger as order_book_file_logger\nimport numpy\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\nimport logging\nfrom pprint import pformat\nimport random\nfrom socket import gaierror\nimport time\n\nfrom dateutil.tz import tzlocal\nimport websockets\n\nfrom trading.openorders import OpenOrders\nfrom trading.spreads import Spreads\nfrom orderbook.book import Book\nfrom trading.strategies import buyer_strategy\n\nARGS = argparse.ArgumentParser(description='Coinbase Exchange bot.')\nARGS.add_argument('--c', action='store_true', dest='command_line', default=False, help='Command line output')\nARGS.add_argument('--t', action='store_true', dest='trading', default=False, help='Trade')\nargs = ARGS.parse_args()\n\norder_book = Book()\nopen_orders = OpenOrders()\nopen_orders.cancel_all()\nspreads = Spreads()\n\n\[email protected]\ndef websocket_to_order_book():\n try:\n coinbase_websocket = yield from websockets.connect(\"wss://ws-feed.pro.coinbase.com\")\n except gaierror:\n order_book_file_logger.error('socket.gaierror - had a problem connecting to Coinbase feed')\n return\n\n yield from coinbase_websocket.send('{\"type\": \"subscribe\", \"product_id\": \"BTC-USD\"}')\n\n messages = []\n while True:\n message = yield from coinbase_websocket.recv()\n message = json.loads(message)\n messages += [message]\n if len(messages) > 20:\n break\n\n order_book.get_level3()\n\n [order_book.process_message(message) for message in messages if message['sequence'] > order_book.level3_sequence]\n messages = []\n while True:\n message = yield from coinbase_websocket.recv()\n if message is None:\n order_book_file_logger.error('Websocket message is None.')\n return False\n try:\n message = json.loads(message)\n except TypeError:\n order_book_file_logger.error('JSON did not load, see ' + str(message))\n return False\n if args.command_line:\n messages += [datetime.strptime(message['time'], '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=pytz.UTC)]\n messages = [message for message in messages if (datetime.now(tzlocal()) - message).seconds < 60]\n if len(messages) > 2:\n diff = numpy.diff(messages)\n diff = [float(sec.microseconds) for sec in diff]\n order_book.average_rate = numpy.mean(diff)\n order_book.fastest_rate = min(diff)\n order_book.slowest_rate = max(diff)\n if not order_book.process_message(message):\n print(pformat(message))\n return False\n if args.trading:\n if 'order_id' in message and message['order_id'] == open_orders.open_ask_order_id:\n if message['type'] == 'done':\n open_orders.open_ask_order_id = None\n open_orders.open_ask_price = None\n open_orders.open_ask_status = None\n open_orders.open_ask_rejections = Decimal('0.0')\n open_orders.open_ask_cancelled = False\n else:\n open_orders.open_ask_status = message['type']\n elif 'order_id' in message and message['order_id'] == open_orders.open_bid_order_id:\n if message['type'] == 'done':\n open_orders.open_bid_order_id = None\n open_orders.open_bid_price = None\n open_orders.open_bid_status = None\n open_orders.open_bid_rejections = Decimal('0.0')\n open_orders.open_bid_cancelled = False\n else:\n open_orders.open_bid_status = message['type']\n\n\ndef update_balances():\n while True:\n open_orders.get_balances()\n time.sleep(30)\n\n\ndef update_orders():\n time.sleep(5)\n open_orders.cancel_all()\n while True:\n open_orders.get_open_orders()\n time.sleep(60*5)\n\n\ndef monitor():\n time.sleep(5)\n while True:\n time.sleep(0.001)\n print('Last message: {0:.6f} secs, '\n 'Min ask: {1:.2f}, Max bid: {2:.2f}, Spread: {3:.2f}, '\n 'Your ask: {4:.2f}, Your bid: {5:.2f}, Your spread: {6:.2f} '\n 'Avg: {7:.10f} Min: {8:.10f} Max: {9:.10f}'.format(\n ((datetime.now(tzlocal()) - order_book.last_time).microseconds * 1e-6),\n order_book.asks.price_tree.min_key(), order_book.bids.price_tree.max_key(),\n order_book.asks.price_tree.min_key() - order_book.bids.price_tree.max_key(),\n open_orders.decimal_open_ask_price, open_orders.decimal_open_bid_price,\n open_orders.decimal_open_ask_price - open_orders.decimal_open_bid_price,\n order_book.average_rate*1e-6, order_book.fastest_rate*1e-6, order_book.slowest_rate*1e-6), end='\\r')\n\n\nif __name__ == '__main__':\n if args.command_line:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('\\n%(asctime)s, %(levelname)s, %(message)s'))\n stream_handler.setLevel(logging.INFO)\n trading_file_logger.addHandler(stream_handler)\n order_book_file_logger.addHandler(stream_handler)\n command_line = True\n\n loop = asyncio.get_event_loop()\n if args.trading:\n loop.run_in_executor(None, functools.partial(buyer_strategy, order_book, open_orders, spreads))\n loop.run_in_executor(None, update_balances)\n loop.run_in_executor(None, update_orders)\n if args.command_line:\n loop.run_in_executor(None, monitor)\n n = 0\n while True:\n start_time = loop.time()\n loop.run_until_complete(websocket_to_order_book())\n end_time = loop.time()\n seconds = end_time - start_time\n if seconds < 2:\n n += 1\n sleep_time = (2 ** n) + (random.randint(0, 1000) / 1000)\n order_book_file_logger.error('Websocket connectivity problem, going to sleep for {0}'.format(sleep_time))\n time.sleep(sleep_time)\n if n > 6:\n n = 0\n"
]
| [
[
"numpy.mean",
"numpy.diff"
]
]
|
maple-research-lab/AdCo | [
"a9f25fc18c12df88c732b33700f3bb698454dd3f"
]
| [
"training/train.py"
]
| [
"import argparse\nimport builtins\nimport math\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom training.train_utils import AverageMeter, ProgressMeter, accuracy\n\ndef update_network(model,images,args,Memory_Bank,losses,top1,top5,optimizer,criterion,mem_losses):\n # update network\n # negative logits: NxK\n\n q, k = model(im_q=images[0], im_k=images[1])\n k = concat_all_gather(k)\n l_pos = torch.einsum('nc,ck->nk', [q, k.T])\n\n d_norm, d, l_neg = Memory_Bank(q)\n\n # logits: Nx(1+K)\n\n logits = torch.cat([l_pos, l_neg], dim=1)\n logits /= args.moco_t\n\n cur_batch_size = logits.shape[0]\n cur_gpu = args.gpu\n choose_match = cur_gpu * cur_batch_size\n labels = torch.arange(choose_match, choose_match + cur_batch_size, dtype=torch.long).cuda()\n total_bsize=logits.shape[1]-args.cluster\n loss = criterion(logits, labels)\n\n # acc1/acc5 are (K+1)-way contrast classifier accuracy\n # measure accuracy and record loss\n acc1, acc5 = accuracy(logits, labels, topk=(1, 5))\n losses.update(loss.item(), images[0].size(0))\n top1.update(acc1.item(), images[0].size(0))\n top5.update(acc5.item(), images[0].size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # update memory bank\n with torch.no_grad():\n logits = torch.cat([l_pos, l_neg], dim=1) / args.mem_t\n p_qd=nn.functional.softmax(logits, dim=1)[:,total_bsize:]\n g = torch.einsum('cn,nk->ck',[q.T,p_qd])/logits.shape[0] - torch.mul(torch.mean(torch.mul(p_qd,l_neg),dim=0),d_norm)\n g = -torch.div(g,torch.norm(d,dim=0))/args.mem_t # c*k\n g = all_reduce(g) / torch.distributed.get_world_size()\n Memory_Bank.v.data = args.momentum * Memory_Bank.v.data + g + args.mem_wd * Memory_Bank.W.data\n Memory_Bank.W.data = Memory_Bank.W.data - args.memory_lr * Memory_Bank.v.data\n logits=torch.softmax(logits,dim=1)\n batch_prob=torch.sum(logits[:,:logits.size(0)],dim=1)\n batch_prob=torch.mean(batch_prob)\n mem_losses.update(batch_prob.item(),logits.size(0))\n return l_neg,logits\n\ndef update_sym_network(model,images,args,Memory_Bank,losses,top1,top5,optimizer,criterion,mem_losses):\n # update network\n # negative logits: NxK\n model.zero_grad()\n q_pred, k_pred, q, k = model(im_q=images[0], im_k=images[1])\n q = concat_all_gather(q)\n k = concat_all_gather(k)\n l_pos1 = torch.einsum('nc,ck->nk', [q_pred, k.T])\n l_pos2=torch.einsum('nc,ck->nk', [k_pred, q.T])\n \n d_norm1, d1, l_neg1 = Memory_Bank(q_pred)\n d_norm2, d2, l_neg2 = Memory_Bank(k_pred)\n # logits: Nx(1+K)\n\n logits1 = torch.cat([l_pos1, l_neg1], dim=1)\n logits1 /= args.moco_t\n logits2 = torch.cat([l_pos2, l_neg2], dim=1)\n logits2 /= args.moco_t\n\n cur_batch_size=logits1.shape[0]\n cur_gpu=args.gpu\n choose_match=cur_gpu*cur_batch_size\n labels=torch.arange(choose_match,choose_match+cur_batch_size,dtype=torch.long).cuda()\n\n loss = 0.5*criterion(logits1, labels)+0.5*criterion(logits2, labels)\n\n\n # acc1/acc5 are (K+1)-way contrast classifier accuracy\n # measure accuracy and record loss\n acc1, acc5 = accuracy(logits1, labels, topk=(1, 5))\n losses.update(loss.item(), images[0].size(0))\n top1.update(acc1.item(), images[0].size(0))\n top5.update(acc5.item(), images[0].size(0))\n acc1, acc5 = accuracy(logits2, labels, topk=(1, 5))\n losses.update(loss.item(), images[0].size(0))\n top1.update(acc1.item(), images[0].size(0))\n top5.update(acc5.item(), images[0].size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # update memory bank\n with torch.no_grad():\n # update memory bank\n\n # logits: Nx(1+K)\n\n logits1 = torch.cat([l_pos1, l_neg1], dim=1)\n logits1 /= args.mem_t\n # negative logits: NxK\n # logits: Nx(1+K)\n\n logits2 = torch.cat([l_pos2, l_neg2], dim=1)\n logits2 /= args.mem_t\n total_bsize = logits1.shape[1] - args.cluster\n p_qd1 = nn.functional.softmax(logits1, dim=1)[:, total_bsize:]\n g1 = torch.einsum('cn,nk->ck', [q_pred.T, p_qd1]) / logits1.shape[0] - torch.mul(\n torch.mean(torch.mul(p_qd1, l_neg1), dim=0), d_norm1)\n p_qd2 = nn.functional.softmax(logits2, dim=1)[:, total_bsize:]\n g2 = torch.einsum('cn,nk->ck', [k_pred.T, p_qd2]) / logits2.shape[0] - torch.mul(\n torch.mean(torch.mul(p_qd2, l_neg2), dim=0), d_norm1)\n g = -0.5*torch.div(g1, torch.norm(d1, dim=0)) / args.mem_t - 0.5*torch.div(g2,\n torch.norm(d1, dim=0)) / args.mem_t # c*k\n g = all_reduce(g) / torch.distributed.get_world_size()\n Memory_Bank.v.data = args.momentum * Memory_Bank.v.data + g + args.mem_wd * Memory_Bank.W.data\n Memory_Bank.W.data = Memory_Bank.W.data - args.memory_lr * Memory_Bank.v.data\n logits1 = torch.softmax(logits1, dim=1)\n batch_prob1 = torch.sum(logits1[:, :logits1.size(0)], dim=1)\n logits2 = torch.softmax(logits2, dim=1)\n batch_prob2 = torch.sum(logits2[:, :logits2.size(0)], dim=1)\n batch_prob = 0.5 * torch.mean(batch_prob1) + 0.5 * torch.mean(batch_prob2)\n mem_losses.update(batch_prob.item(), logits1.size(0))\n return l_neg1,logits1\n\ndef update_network_multi(model,images,args,Memory_Bank,losses,top1,top5,optimizer,criterion,mem_losses):\n # update network\n # negative logits: NxK\n image_size = len(images)\n q_list, k = model(im_q=images[1:image_size], im_k=images[0])\n k = concat_all_gather(k)\n l_pos_list = []\n for q in q_list:\n # l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)\n l_pos = torch.einsum('nc,ck->nk', [q, k.T])\n l_pos_list.append(l_pos)\n\n d_norm, d, l_neg_list = Memory_Bank(q_list)\n\n loss = 0\n cur_batch_size = l_pos_list[0].shape[0]\n cur_gpu = args.gpu\n choose_match = cur_gpu * cur_batch_size\n labels = torch.arange(choose_match, choose_match + cur_batch_size, dtype=torch.long).cuda()\n for k in range(len(l_pos_list)):\n logits = torch.cat([l_pos_list[k], l_neg_list[k]], dim=1)\n logits /= args.moco_t\n loss += criterion(logits, labels)\n if k == 0:\n # acc1/acc5 are (K+1)-way contrast classifier accuracy\n # measure accuracy and record loss\n acc1, acc5 = accuracy(logits, labels, topk=(1, 5))\n losses.update(loss.item(), images[0].size(0))\n top1.update(acc1.item(), images[0].size(0))\n top5.update(acc5.item(), images[0].size(0))\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # update memory bank\n g_sum = 0\n with torch.no_grad():\n for k in range(len(l_pos_list)):\n logits = torch.cat([l_pos_list[k], l_neg_list[k]], dim=1) / args.mem_t\n total_bsize = logits.shape[1] - args.cluster\n p_qd = nn.functional.softmax(logits, dim=1)[:, total_bsize:] # n*k\n g = torch.einsum('cn,nk->ck', [q_list[k].T, p_qd]) / logits.shape[0] - torch.mul(\n torch.mean(torch.mul(p_qd, l_neg_list[k]), dim=0),\n d_norm)\n g_sum += -torch.div(g, torch.norm(d, dim=0)) / args.mem_t # c*k\n if k == 0:\n logits = torch.softmax(logits, dim=1)\n batch_prob = torch.sum(logits[:, :logits.size(0)], dim=1)\n batch_prob = torch.mean(batch_prob)\n mem_losses.update(batch_prob.item(), logits.size(0))\n g_sum = all_reduce(g_sum) / torch.distributed.get_world_size()\n Memory_Bank.v.data = args.momentum * Memory_Bank.v.data + g_sum + args.mem_wd * Memory_Bank.W.data\n Memory_Bank.W.data = Memory_Bank.W.data - args.memory_lr * Memory_Bank.v.data\n\n\ndef train(train_loader, model,Memory_Bank, criterion,\n optimizer,epoch, args):\n\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n mem_losses = AverageMeter('MemLoss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, mem_losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, _) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n for k in range(len(images)):\n images[k] = images[k].cuda(args.gpu, non_blocking=True)\n batch_size=images[0].size(0)\n if args.multi_crop:\n if i==0:\n print(\"in total %d multi crops adopted\"%len(images))\n update_network_multi(model, images, args, Memory_Bank, losses, top1, top5, optimizer, criterion,mem_losses)\n elif not args.sym:\n update_network(model, images, args, Memory_Bank, losses, top1, top5, optimizer, criterion,mem_losses)\n else:\n update_sym_network(model, images, args, Memory_Bank, losses, top1, top5, optimizer, criterion, mem_losses)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n return top1.avg\n\ndef init_memory(train_loader, model,Memory_Bank, criterion,\n optimizer,epoch, args):\n \n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n # switch to train mode\n model.train()\n for i, (images, _) in enumerate(train_loader):\n # measure data loading time\n if args.gpu is not None:\n for k in range(len(images)):\n images[k] = images[k].cuda(args.gpu, non_blocking=True)\n \n # compute output\n if args.multi_crop:\n q_list, k = model(im_q=images[0:-1], im_k=images[-1])\n q = q_list[0]\n elif not args.sym:\n q, k = model(im_q=images[0], im_k=images[1])\n else:\n q, _, _, k = model(im_q=images[0], im_k=images[1])\n d_norm, d, l_neg = Memory_Bank(q, init_mem=True)\n\n l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)\n # logits: Nx(1+K)\n\n logits = torch.cat([l_pos, l_neg], dim=1)\n logits /= args.moco_t\n labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()\n loss = criterion(logits, labels)\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n acc1, acc5 = accuracy(logits, labels, topk=(1, 5))\n \n losses.update(loss.item(), images[0].size(0))\n top1.update(acc1.item(), images[0].size(0))\n top5.update(acc5.item(), images[0].size(0))\n if i % args.print_freq == 0:\n progress.display(i)\n\n # fill the memory bank\n output = concat_all_gather(k)\n batch_size = output.size(0)\n start_point = i * batch_size\n end_point = min((i + 1) * batch_size, args.cluster)\n Memory_Bank.W.data[:, start_point:end_point] = output[:end_point - start_point].T\n if (i+1) * batch_size >= args.cluster:\n break\n for param_q, param_k in zip(model.module.encoder_q.parameters(),\n model.module.encoder_k.parameters()):\n param_k.data.copy_(param_q.data) # initialize\n\[email protected]_grad()\ndef all_reduce(tensor):\n \"\"\"\n Performs all_reduce(mean) operation on the provided tensors.\n *** Warning ***: torch.distributed.all_reduce has no gradient.\n \"\"\"\n torch.distributed.all_reduce(tensor, async_op=False)\n\n return tensor\n\[email protected]_grad()\ndef concat_all_gather(tensor):\n \"\"\"\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n \"\"\"\n tensors_gather = [torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output"
]
| [
[
"torch.zeros",
"torch.distributed.get_world_size",
"torch.cat",
"torch.mul",
"torch.einsum",
"torch.arange",
"torch.norm",
"torch.no_grad",
"torch.distributed.all_gather",
"torch.softmax",
"torch.distributed.all_reduce",
"torch.ones_like",
"torch.nn.functional.softmax",
"torch.mean"
]
]
|
tkc-morita/seq2seq_abcd-vae | [
"9da0b81a0f15554ed4915f1a4c7a5b770c5197ac"
]
| [
"ABCD-VAE/encode_logit.py"
]
| [
"# coding: utf-8\n\nimport torch\nfrom modules.data_utils import Compose\nimport numpy as np\nimport pandas as pd\nfrom modules import data_utils\nimport learning\nimport os, argparse, itertools\n\n\nclass Encoder(learning.Learner):\n\tdef __init__(self, model_config_path, device = 'cpu'):\n\t\tself.device = torch.device(device)\n\t\tself.retrieve_model(checkpoint_path = model_config_path, device=device)\n\t\tfor param in self.parameters():\n\t\t\tparam.requires_grad = False\n\t\tself.encoder.eval() # Turn off dropout\n\t\tself.feature_sampler.eval()\n\t\tself.decoder.eval()\n\n\n\tdef encode(self, data, is_packed = False, to_numpy = True):\n\t\tif not is_packed:\n\t\t\tif not isinstance(data, list):\n\t\t\t\tdata = [data]\n\t\t\tdata = torch.nn.utils.rnn.pack_sequence(data)\n\t\twith torch.no_grad():\n\t\t\tdata = data.to(self.device)\n\t\t\tlast_hidden = self.encoder(data)\n\t\t\tlogits = self.feature_sampler(last_hidden)\n\t\tif to_numpy:\n\t\t\tlogits = (p.data.cpu().numpy() for p in logits)\n\t\treturn logits\n\n\n\tdef encode_dataset(self, dataset, save_path, to_numpy = True, batch_size=1):\n\t\tdataloader = data_utils.DataLoader(dataset, batch_size=batch_size)\n\t\trename_existing_file(save_path)\n\t\tif 'label' in dataset.df_annotation.columns:\n\t\t\tdf_ann = dataset.df_annotation.drop(columns=['onset_ix','offset_ix','length'])\n\t\telse:\n\t\t\tdf_ann = None\n\t\tfor data, _, _, ix_in_list in dataloader:\n\t\t\tlogits = self.encode(data, is_packed=True, to_numpy=to_numpy)\n\t\t\tdf_encoded = pd.DataFrame(logits)\n\t\t\tdf_encoded.loc[:,'data_ix'] = ix_in_list\n\t\t\tdf_encoded = df_encoded.melt(id_vars=['data_ix'], var_name='dimension', value_name='logit')\n\t\t\tif not df_ann is None:\n\t\t\t\tdf_encoded = df_encoded.merge(df_ann, how='left', left_on='data_ix', right_index=True)\n\t\t\tif os.path.isfile(save_path):\n\t\t\t\tdf_encoded.to_csv(save_path, index=False, mode='a', header=False)\n\t\t\telse:\n\t\t\t\tdf_encoded.to_csv(save_path, index=False)\n\ndef rename_existing_file(filepath):\n\tif os.path.isfile(filepath):\n\t\tnew_path = filepath+'.prev'\n\t\trename_existing_file(new_path)\n\t\tos.rename(filepath, new_path)\n\ndef get_parameters():\n\tpar_parser = argparse.ArgumentParser()\n\n\tpar_parser.add_argument('model_path', type=str, help='Path to the configuration file of a trained model.')\n\tpar_parser.add_argument('input_root', type=str, help='Path to the root directory under which inputs are located.')\n\tpar_parser.add_argument('annotation_file', type=str, help='Path to the annotation csv file.')\n\tpar_parser.add_argument('data_normalizer', type=float, help='Normalizing constant to devide the data.')\n\tpar_parser.add_argument('--annotation_sep', type=str, default=',', help='Separator symbol of the annotation file. Comma \",\" by default (i.e., csv).')\n\tpar_parser.add_argument('-d', '--device', type=str, default='cpu', help='Computing device.')\n\tpar_parser.add_argument('-S', '--save_path', type=str, default=None, help='Path to the file where results are saved.')\n\tpar_parser.add_argument('--fft_frame_length', type=float, default=0.008, help='FFT frame length in sec.')\n\tpar_parser.add_argument('--fft_step_size', type=float, default=0.004, help='FFT step size in sec.')\n\tpar_parser.add_argument('--fft_window_type', type=str, default='hann_window', help='Window type for FFT. \"hann_window\" by default.')\n\tpar_parser.add_argument('--fft_no_centering', action='store_true', help='If selected, no centering in FFT.')\n\tpar_parser.add_argument('--channel', type=int, default=0, help='Channel ID # (starting from 0) of multichannel recordings to use.')\n\tpar_parser.add_argument('-E','--epsilon', type=float, default=2**(-15), help='Small positive real number to add to avoid log(0).')\n\tpar_parser.add_argument('-b', '--batch_size', type=int, default=1, help='Batch size.')\n\n\treturn par_parser.parse_args()\n\nif __name__ == '__main__':\n\tparameters = get_parameters()\n\n\tsave_path = parameters.save_path\n\tif save_path is None:\n\t\tsave_path = os.path.join(parameters.input_root, 'autoencoded.csv')\n\tsave_dir = os.path.dirname(save_path)\n\tif not os.path.isdir(save_dir):\n\t\tos.makedirs(save_dir)\n\n\tdata_parser = data_utils.Data_Parser(parameters.input_root, parameters.annotation_file, annotation_sep=parameters.annotation_sep)\n\tfs = data_parser.get_sample_freq() # Assuming all the wav files have the same fs, get the 1st file's.\n\n\tfft_frame_length = int(np.floor(parameters.fft_frame_length * fs))\n\tfft_step_size = int(np.floor(parameters.fft_step_size * fs))\n\n\t# Get a model.\n\tencoder = Encoder(parameters.model_path, device=parameters.device)\n\n\tto_tensor = data_utils.ToTensor()\n\tstft = data_utils.STFT(fft_frame_length, fft_step_size, window=parameters.fft_window_type, centering=not parameters.fft_no_centering)\n\tlog_and_normalize = data_utils.Transform(lambda x: (x + parameters.epsilon).log() / parameters.data_normalizer)\n\n\tdataset = data_parser.get_data(transform=Compose([to_tensor,stft,log_and_normalize]), channel=parameters.channel)\n\n\tencoder.encode_dataset(dataset, save_path, batch_size=parameters.batch_size)\n\n"
]
| [
[
"torch.device",
"torch.nn.utils.rnn.pack_sequence",
"pandas.DataFrame",
"torch.no_grad",
"numpy.floor"
]
]
|
dreamfyrian/imitation | [
"682bc4b919baf57bdd959ac646caba21d92cdf71"
]
| [
"src/imitation/policies/exploration_wrapper.py"
]
| [
"\"\"\"Wrapper to turn a policy into a more exploratory version.\"\"\"\n\nfrom typing import Optional\n\nimport numpy as np\nfrom stable_baselines3.common import vec_env\n\nfrom imitation.data import rollout\n\n\nclass ExplorationWrapper:\n \"\"\"Wraps a PolicyCallable to create a partially randomized version.\n\n This wrapper randomly switches between two policies: the wrapped policy,\n and a random one. After each action, the current policy is kept\n with a certain probability. Otherwise, one of these two policies is chosen\n at random (without any dependence on what the current policy is).\n\n The random policy uses the `action_space.sample()` method.\n \"\"\"\n\n def __init__(\n self,\n policy: rollout.PolicyCallable,\n venv: vec_env.VecEnv,\n random_prob: float,\n stay_prob: float,\n seed: Optional[int] = None,\n ):\n \"\"\"Initializes the ExplorationWrapper.\n\n Args:\n policy: The policy to randomize.\n venv: The environment to use (needed for sampling random actions).\n random_prob: The probability of picking the random policy when switching.\n stay_prob: The probability of staying with the current policy.\n seed: The random seed to use.\n \"\"\"\n self.wrapped_policy = policy\n self.random_prob = random_prob\n self.stay_prob = stay_prob\n self.venv = venv\n\n self.rng = np.random.RandomState(seed)\n self.venv.action_space.seed(seed)\n\n self.current_policy = policy\n # Choose the initial policy at random\n self._switch()\n\n def _random_policy(self, obs: np.ndarray) -> np.ndarray:\n acts = [self.venv.action_space.sample() for _ in range(len(obs))]\n return np.stack(acts, axis=0)\n\n def _switch(self) -> None:\n \"\"\"Pick a new policy at random.\"\"\"\n if self.rng.rand() < self.random_prob:\n self.current_policy = self._random_policy\n else:\n self.current_policy = self.wrapped_policy\n\n def __call__(self, obs: np.ndarray) -> np.ndarray:\n acts = self.current_policy(obs)\n if self.rng.rand() < self.stay_prob:\n self._switch()\n return acts\n"
]
| [
[
"numpy.stack",
"numpy.random.RandomState"
]
]
|
kernsuite-debian/bifrost | [
"04be6ba3acb59358f9cb4ebecfa154002a2efe39"
]
| [
"test/test_reduce.py"
]
| [
"\n# Copyright (c) 2016, The Bifrost Authors. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of The Bifrost Authors nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport unittest\nimport numpy as np\nimport bifrost as bf\n#import time\n\ndef stderr(data, axis):\n return np.sum(data, axis=axis) / np.sqrt(data.shape[axis])\n\nNP_OPS = {\n 'sum': np.sum,\n 'mean': np.mean,\n 'min': np.min,\n 'max': np.max,\n 'stderr': stderr\n}\n\ndef scrunch(data, factor=2, axis=0, func=np.sum):\n if factor is None:\n factor = data.shape[axis]\n s = data.shape\n if s[axis] % factor != 0:\n raise ValueError(\"Scrunch factor does not divide axis size\")\n s = s[:axis] + (s[axis]//factor, factor) + s[axis:][1:]\n axis = axis + 1 if axis >= 0 else axis\n return func(data.reshape(s), axis=axis)\n\nclass ReduceTest(unittest.TestCase):\n def setUp(self):\n np.random.seed(1234)\n def run_reduce_test(self, shape, axis, n, op='sum', dtype=np.float32):\n a = ((np.random.random(size=shape)*2-1)*127).astype(np.int8).astype(dtype)\n b_gold = scrunch(a.astype(np.float32), n, axis, NP_OPS[op])\n a = bf.asarray(a, space='cuda')\n b = bf.empty_like(b_gold, space='cuda')\n bf.reduce(a, b, op)\n #for _ in xrange(10):\n # bf.reduce(a, b, op)\n #bf.device.stream_synchronize();\n #t0 = time.time()\n #nrep = 30\n #for _ in xrange(nrep):\n # bf.reduce(a, b, op)\n #bf.device.stream_synchronize();\n #dt = time.time() - t0\n #print nrep * (a.nbytes + b.nbytes) / dt / 1e9, 'GB/s', shape, axis, n, dtype\n b = b.copy('system')\n np.testing.assert_allclose(b, b_gold)\n def test_reduce(self):\n self.run_reduce_test((3,6,5), axis=1, n=2, op='sum', dtype=np.float32)\n for shape in [(20,20,40), (20,40,60), (40,100,200)]:\n for axis in xrange(3):\n for n in [2, 4, 5, 10, None]:\n for op in ['sum', 'mean']:#, 'min', 'max', 'stderr']:\n for dtype in [np.float32, np.int16, np.int8]:\n #print shape, axis, n, op, dtype\n self.run_reduce_test(shape, axis, n, op, dtype)\n def test_reduce_pow2(self):\n for shape in [(16,32,64), (16,64,256), (256,64,16)]:#, (256, 256, 512)]:\n for axis in xrange(3):\n for n in [2, 4, 8, 16, None]:\n for op in ['sum', 'mean']:#, 'min', 'max', 'stderr']:\n for dtype in [np.float32, np.int16, np.int8]:\n #print shape, axis, n, op, dtype\n self.run_reduce_test(shape, axis, n, op, dtype)\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.random.seed",
"numpy.sum",
"numpy.sqrt",
"numpy.random.random"
]
]
|
leomarkcastro/Image-Enhancer-DPIR | [
"58a99cfd3325dabab29b6f68ad96ae17d090445b"
]
| [
"main_dpir_denoising.py"
]
| [
"import os.path\nimport logging\n\nimport numpy as np\nfrom collections import OrderedDict\n\nimport torch\n\nfrom utils import utils_logger\nfrom utils import utils_model\nfrom utils import utils_image as util\n\n\n\"\"\"\nSpyder (Python 3.7)\nPyTorch 1.6.0\nWindows 10 or Linux\nKai Zhang ([email protected])\ngithub: https://github.com/cszn/DPIR\n https://github.com/cszn/IRCNN\n https://github.com/cszn/KAIR\n@article{zhang2020plug,\n title={Plug-and-Play Image Restoration with Deep Denoiser Prior},\n author={Zhang, Kai and Li, Yawei and Zuo, Wangmeng and Zhang, Lei and Van Gool, Luc and Timofte, Radu},\n journal={arXiv preprint},\n year={2020}\n}\n% If you have any question, please feel free to contact with me.\n% Kai Zhang (e-mail: [email protected]; homepage: https://cszn.github.io/)\nby Kai Zhang (01/August/2020)\n\n# --------------------------------------------\n|--model_zoo # model_zoo\n |--drunet_gray # model_name, for color images\n |--drunet_color\n|--testset # testsets\n |--set12 # testset_name\n |--bsd68\n |--cbsd68\n|--results # results\n |--set12_dn_drunet_gray # result_name = testset_name + '_' + 'dn' + model_name\n |--set12_dn_drunet_color\n# --------------------------------------------\n\"\"\"\n\n\ndef denoising(source_folder: list, source_result: list, denoise=200, model_to_use=\"drunet_color\"):\n\n # ----------------------------------------\n # Preparation\n # ----------------------------------------\n\n noise_level_img = float(denoise) # set AWGN noise level for noisy image\n noise_level_model = noise_level_img # set noise level for model\n model_name = model_to_use # set denoiser model, 'drunet_gray' | 'drunet_color'\n testset_name = source_folder[1] # set test set, 'bsd68' | 'cbsd68' | 'set12'\n x8 = False # default: False, x8 to boost performance\n show_img = False # default: False\n border = 0 # shave boader to calculate PSNR and SSIM\n\n if 'color' in model_name:\n n_channels = 3 # 3 for color image\n else:\n n_channels = 1 # 1 for grayscale image\n\n model_pool = 'model_zoo' # fixed\n testsets = source_folder[0] # fixed\n results = source_result[0] # fixed\n task_current = 'dn' # 'dn' for denoising\n result_name = source_result[1]\n\n model_path = os.path.join(model_pool, \"Copy of \"+model_name+'.pth')\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n torch.cuda.empty_cache()\n\n # ----------------------------------------\n # L_path, E_path, H_path\n # ----------------------------------------\n\n L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images\n E_path = os.path.join(results, result_name) # E_path, for Estimated images\n util.mkdir(E_path)\n\n logger_name = result_name\n utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log'))\n logger = logging.getLogger(logger_name)\n\n # ----------------------------------------\n # load model\n # ----------------------------------------\n\n from models.network_unet import UNetRes as net\n model = net(in_nc=n_channels+1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode=\"strideconv\", upsample_mode=\"convtranspose\")\n model.load_state_dict(torch.load(model_path), strict=True)\n model.eval()\n for k, v in model.named_parameters():\n v.requires_grad = False\n model = model.to(device)\n logger.info('Model path: {:s}'.format(model_path))\n number_parameters = sum(map(lambda x: x.numel(), model.parameters()))\n logger.info('Params number: {}'.format(number_parameters))\n\n test_results = OrderedDict()\n test_results['psnr'] = []\n test_results['ssim'] = []\n\n logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format(model_name, noise_level_img, noise_level_model))\n logger.info(L_path)\n L_paths = util.get_image_paths(L_path)\n\n for idx, img in enumerate(L_paths):\n\n # ------------------------------------\n # (1) img_L\n # ------------------------------------\n\n img_name, ext = os.path.splitext(os.path.basename(img))\n # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext))\n img_H = util.imread_uint(img, n_channels=n_channels)\n img_L = util.uint2single(img_H)\n\n # Add noise without clipping\n np.random.seed(seed=0) # for reproducibility\n img_L += np.random.normal(0, noise_level_img/255., img_L.shape)\n\n util.imshow(util.single2uint(img_L), title='Noisy image with noise level {}'.format(noise_level_img)) if show_img else None\n\n img_L = util.single2tensor4(img_L)\n img_L = torch.cat((img_L, torch.FloatTensor([noise_level_model/255.]).repeat(1, 1, img_L.shape[2], img_L.shape[3])), dim=1)\n img_L = img_L.to(device)\n\n # ------------------------------------\n # (2) img_E\n # ------------------------------------\n\n if not x8 and img_L.size(2)//8==0 and img_L.size(3)//8==0:\n img_E = model(img_L)\n elif not x8 and (img_L.size(2)//8!=0 or img_L.size(3)//8!=0):\n img_E = utils_model.test_mode(model, img_L, refield=64, mode=5)\n elif x8:\n img_E = utils_model.test_mode(model, img_L, mode=3)\n\n img_E = util.tensor2uint(img_E)\n\n # --------------------------------\n # PSNR and SSIM\n # --------------------------------\n\n if n_channels == 1:\n img_H = img_H.squeeze() \n psnr = util.calculate_psnr(img_E, img_H, border=border)\n ssim = util.calculate_ssim(img_E, img_H, border=border)\n test_results['psnr'].append(psnr)\n test_results['ssim'].append(ssim)\n logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(img_name+ext, psnr, ssim))\n\n # ------------------------------------\n # save results\n # ------------------------------------\n\n util.imsave(img_E, os.path.join(E_path, img_name+ext))\n\n ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])\n ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])\n logger.info('Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.format(result_name, ave_psnr, ave_ssim))\n\n\nif __name__ == '__main__':\n\n denoising()\n"
]
| [
[
"numpy.random.normal",
"numpy.random.seed",
"torch.FloatTensor",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.load"
]
]
|
Benson0704/MSI_MSS_Prediction | [
"aea880db359f105157de9798c9b01883df0b361c"
]
| [
"getRatio.py"
]
| [
"import random\nfrom sklearn import linear_model\nfrom sklearn import neighbors\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import svm\nimport time\nimport openpyxl\nimport matplotlib.pyplot\nimport numpy\nimport scipy\nDO_RAW = 1\nDO_LR = 1\nDO_SVC = 0\nDO_KNN = 0\nLR_C = 0.0699\n\n\ndef paint_roc(label_list, predicted_list, output_str='test.png'):\n fpr, tpr, thersholds = metrics.roc_curve(\n label_list, predicted_list, pos_label=1)\n roc_auc = metrics.auc(fpr, tpr)\n result.writelines('{}: auc in roc painting is: {}'.format(\n output_str.split('.')[0], roc_auc))\n matplotlib.pyplot.plot(\n fpr, tpr, '-', label='ROC (area = {0:.4f})'.format(roc_auc), lw=1)\n\n matplotlib.pyplot.xlim([-0.05, 1.05]) # 设置x、y轴的上下限,以免和边缘重合,更好的观察图像的整体\n matplotlib.pyplot.ylim([-0.05, 1.05])\n matplotlib.pyplot.xlabel('False Positive Rate')\n matplotlib.pyplot.ylabel('True Positive Rate') # 可以使用中文,但需要导入一些库即字体\n matplotlib.pyplot.title('ROC Curve')\n matplotlib.pyplot.legend(loc=\"lower right\")\n matplotlib.pyplot.savefig(output_str.replace('.png', '.eps'), format='eps')\n matplotlib.pyplot.close()\n\n\ntrain = open('train.txt', 'r')\ntest = open('test.txt', 'r')\nresult = open('result.txt', 'w')\nresult.writelines(time.asctime(time.localtime())+' Starts!\\n')\ntrain_name = []\ntrain_score = []\ntrain_label = []\nfor line in train.readlines():\n train_name.append(line[:-1].split(' ')[0].split('/')[-1])\n train_score.append(float(line[:-1].split(' ')[1][:8]))\n train_label.append(int(line[:-1].split(' ')[2]))\ntrain.close()\ntest_name = []\ntest_score = []\ntest_label = []\nfor line in test.readlines():\n test_name.append(line[:-1].split(' ')[0].split('/')[-1])\n test_score.append(float(line[:-1].split(' ')[1][:8]))\n test_label.append(int(line[:-1].split(' ')[2]))\ntest.close()\n\n\n# raw 0.5 acc\nif DO_RAW:\n right = 0\n for i, _ in enumerate(test_label):\n right += ((test_score[i] >= 0.5) == test_label[i])\n raw_result = [int(x >= 0.5) for x in test_score]\n result.writelines('raw 0.5 acc: {:.6f}\\n'.format(right/len(test_label)))\n result.writelines('predict results:\\n'+str(raw_result) +\n '\\nwrong cases:(rank, score, label)\\n')\n for i, _ in enumerate(test_label):\n if test_label[i] != raw_result[i]:\n result.writelines(str((i, test_score[i], test_label[i]))+', ')\n paint_roc(test_label, test_score, 'raw.png')\n result.writelines('\\n')\n\n\ndef cross_validation(train_data, label):\n train_data0 = train_data[0:len(train_data)//5]\n train_data1 = train_data[len(train_data)//5:len(train_data)//5*2]\n train_data2 = train_data[len(train_data)//5*2:len(train_data)//5*3]\n train_data3 = train_data[len(train_data)//5*3:len(train_data)//5*4]\n train_data4 = train_data[len(train_data)//5*4:]\n train_datas = [train_data0, train_data1,\n train_data2, train_data3, train_data4]\n label0 = label[0:len(label)//5]\n label1 = label[len(label)//5:len(label)//5*2]\n label2 = label[len(label)//5*2:len(label)//5*3]\n label3 = label[len(label)//5*3:len(label)//5*4]\n label4 = label[len(label)//5*4:]\n labels = [label0, label1,\n label2, label3, label4]\n result = []\n for i in range(5):\n train_CV = []\n test_CV = []\n for x in range(5):\n if x != i:\n train_CV += train_datas[x]\n else:\n test_CV += train_datas[x]\n train_label = []\n test_label = []\n for x in range(5):\n if x != i:\n train_label += labels[x]\n else:\n test_label += labels[x]\n max_acc = 0\n max_acc_prob = 0\n for j in range(0, 1000):\n train_correct = 0\n train_total = 0\n train_result = []\n for x in train_CV:\n train_result.append(x > j*0.001)\n for x, _ in enumerate(train_result):\n train_total += 1\n train_correct += (train_result[x] == train_label[x])\n if train_correct/train_total > max_acc:\n max_acc = train_correct/train_total\n max_acc_prob = j\n test_correct = 0\n test_total = 0\n test_result = []\n for x in test_CV:\n test_result.append(x > max_acc_prob*0.001)\n for x, _ in enumerate(test_result):\n test_total += 1\n test_correct += (test_result[x] == test_label[x])\n result.append(max_acc_prob*0.001)\n print('{}: prob: {} acc: {}'.format(\n i, max_acc_prob*0.001, test_correct/test_total))\n return result\n\n\nmax_acc = 0\nmax_acc_prob = 0\nr = cross_validation(train_score, train_label)\nfor j in range(0, 1000):\n train_correct = 0\n train_total = 0\n train_result = []\n for x in train_score:\n train_result.append(x > j*0.001)\n for x, _ in enumerate(train_result):\n train_total += 1\n train_correct += (train_result[x] == train_label[x])\n if train_correct/train_total > max_acc:\n max_acc = train_correct/train_total\n max_acc_prob = j\nprint('max acc on train: ', max_acc, 'divided at: ', max_acc_prob*0.001)\n\nprint('0.500')\ntest_correct = 0\ntest_total = 0\ntest_result = []\nfor x in test_score:\n test_result.append(x > 500*0.001)\nfor x, _ in enumerate(test_result):\n test_total += 1\n test_correct += (test_result[x] == test_label[x])\nprint('test acc: ', test_correct/test_total)\nprint('result: ', test_result)\nprint('label: ', test_label)\nprint('score: ', test_score)\nreal0to0 = 0\nreal0to1 = 0\nreal1to0 = 0\nreal1to1 = 0\nfor x, _ in enumerate(test_result):\n if test_label[x] == 0:\n if test_result[x] == 0:\n real0to0 += 1\n else:\n real0to1 += 1\n else:\n if test_result[x] == 0:\n real1to0 += 1\n else:\n real1to1 += 1\nprint(\n ' predict0 predict1\\nreal0 {} {}\\nreal1 {} {}'.format(\n real0to0, real0to1, real1to0, real1to1)\n)\nacc = []\nfor i in range(500):\n pt = []\n plt = []\n test_correct = 0\n test_total = 0\n for j in range(len(test_label)):\n x = random.randint(0, len(test_label)-1)\n pt.append(test_result[x])\n plt.append(test_label[x])\n for x, _ in enumerate(pt):\n test_total += 1\n test_correct += (pt[x] == plt[x])\n acc.append(test_correct/test_total)\nacc = numpy.array(acc)\nmean, std = acc.mean(), acc.std(ddof=1)\nprint('(bootstrap 500) mean is ' + str(mean))\nconf_intveral = scipy.stats.norm.interval(0.95, loc=mean, scale=std)\nprint('conf_intveral ' + str(conf_intveral))\n\n# 最重要的函数:通过precision_recall_curve()函数,求出recall,precision,以及阈值\nprecision, recall, thresholds = metrics.precision_recall_curve(\n test_label, test_score)\nprint(precision)\nprint(recall)\nmatplotlib.pyplot.plot(recall, precision, lw=1)\nmatplotlib.pyplot.plot([0, 1], [0, 1], '--',\n color=(0.6, 0.6, 0.6), label=\"Luck\")\t\t\t\t # 画对角线\nmatplotlib.pyplot.xlim([-0.05, 1.05])\nmatplotlib.pyplot.ylim([-0.05, 1.05])\nmatplotlib.pyplot.xlabel(\"Recall Rate\")\nmatplotlib.pyplot.ylabel(\"Precision Rate\")\nmatplotlib.pyplot.savefig('pr_0.5.eps')\nprint('0.307')\ntest_correct = 0\ntest_total = 0\ntest_result = []\nfor x in test_score:\n test_result.append(x > max_acc_prob*0.001)\nfor x, _ in enumerate(test_result):\n test_total += 1\n test_correct += (test_result[x] == test_label[x])\nprint('test acc: ', test_correct/test_total)\nprint('result: ', test_result)\nprint('label: ', test_label)\nprint('score: ', test_score)\nreal0to0 = 0\nreal0to1 = 0\nreal1to0 = 0\nreal1to1 = 0\nfor x, _ in enumerate(test_result):\n if test_label[x] == 0:\n if test_result[x] == 0:\n real0to0 += 1\n else:\n real0to1 += 1\n else:\n if test_result[x] == 0:\n real1to0 += 1\n else:\n real1to1 += 1\nprint(\n ' predict0 predict1\\nreal0 {} {}\\nreal1 {} {}'.format(\n real0to0, real0to1, real1to0, real1to1)\n)\nacc = []\nfor i in range(500):\n pt = []\n plt = []\n test_correct = 0\n test_total = 0\n for j in range(len(test_label)):\n x = random.randint(0, len(test_label)-1)\n pt.append(test_result[x])\n plt.append(test_label[x])\n for x, _ in enumerate(pt):\n test_total += 1\n test_correct += (pt[x] == plt[x])\n acc.append(test_correct/test_total)\nacc = numpy.array(acc)\nmean, std = acc.mean(), acc.std(ddof=1)\nprint('(bootstrap 500) mean is ' + str(mean))\nconf_intveral = scipy.stats.norm.interval(0.95, loc=mean, scale=std)\nprint('conf_intveral ' + str(conf_intveral))\nprecision, recall, thresholds = metrics.precision_recall_curve(\n test_label, test_score)\nprint(precision)\nprint(recall)\nmatplotlib.pyplot.plot(recall, precision, lw=1)\nmatplotlib.pyplot.plot([0, 1], [0, 1], '--',\n color=(0.6, 0.6, 0.6), label=\"Luck\")\t\t\t\t # 画对角线\nmatplotlib.pyplot.xlim([-0.05, 1.05])\nmatplotlib.pyplot.ylim([-0.05, 1.05])\nmatplotlib.pyplot.xlabel(\"Recall Rate\")\nmatplotlib.pyplot.ylabel(\"Precision Rate\")\nmatplotlib.pyplot.savefig('pr_0.307.eps')\n# LR\nif DO_LR:\n LR_acc = 0\n LR_train_data = [[x] for x in train_score]\n LR_train_label = [x for x in train_label]\n if 'LR_C' not in locals().keys():\n LR_C = 0\n for iter in range(1, 2000):\n LR = linear_model.LogisticRegression(C=0.0001*iter)\n\n LR_score = cross_val_score(LR, LR_train_data, LR_train_label)\n LR_score = list(LR_score)\n LR_tmp = 0\n for x in LR_score:\n LR_tmp += x\n LR_tmp /= 5\n '''\n LR.fit(LR_train_data, LR_train_label)\n LR_result = list(LR.predict([[x] for x in train_score]))\n right = 0\n for i, _ in enumerate(LR_train_label):\n right += (LR_result[i] == LR_train_label[i])\n LR_tmp = right/len(LR_train_label)\n '''\n if LR_tmp > LR_acc:\n LR_acc = LR_tmp\n LR_C = iter*0.0001\n if iter % 20 == 0:\n print('LR', iter)\n result.writelines(\n '\\nLR: validation acc is {}, LR_C: {}\\n'.format(LR_acc, LR_C))\n LR = linear_model.LogisticRegression(C=LR_C)\n LR.fit(LR_train_data, LR_train_label)\n LR_result = list(LR.predict([[x] for x in test_score]))\n LR_probability = LR.predict_proba([[x] for x in test_score])\n probability = []\n for i in LR_probability:\n probability.append(i[1])\n right = 0\n for i, _ in enumerate(test_label):\n right += (LR_result[i] == test_label[i])\n result.writelines('LR acc: {:.6f}\\n'.format(right/len(test_label)))\n result.writelines('predict results:\\n'+str(LR_result) +\n '\\nwrong cases:(rank, score, label)\\n')\n for i, _ in enumerate(test_label):\n if test_label[i] != LR_result[i]:\n result.writelines(str((i, test_score[i], test_label[i]))+', ')\n paint_roc(test_label, probability, 'LR1.png')\n paint_roc(test_label, test_score, 'LR2.png')\n paint_roc(test_label, [random.random()\n for _ in probability], 'LR3.png')\n paint_roc(test_label, LR_result, 'LR4.png')\n print('intercept:', LR.intercept_, 'coef:', LR.coef_)\n print('score:\\n', test_score)\n print('LR result:\\n', probability)\n\n result.writelines('\\n')\n\n# SVM-SVC\nif DO_SVC:\n SVC_C = 0\n SVC_acc = 0\n SVC_train_data = [[x] for x in train_score]\n SVC_train_label = [x for x in train_label]\n\n for iter in range(1, 2000):\n SVC = svm.SVC(C=0.001*iter)\n\n SVC_score = cross_val_score(SVC, SVC_train_data, SVC_train_label)\n SVC_score = list(SVC_score)\n SVC_tmp = 0\n for x in SVC_score:\n SVC_tmp += x\n SVC_tmp /= 5\n '''\n SVC.fit(SVC_train_data, SVC_train_label)\n SVC_result = list(SVC.predict([[x] for x in train_score]))\n right = 0\n for i, _ in enumerate(SVC_train_label):\n right += (SVC_result[i] == SVC_train_label[i])\n SVC_tmp = right/len(SVC_train_label)\n '''\n if SVC_tmp >= SVC_acc:\n SVC_acc = SVC_tmp\n SVC_C = iter*0.001\n if iter % 20 == 0:\n print('SVC', iter)\n result.writelines(\n '\\nSVC: validation acc is {}, SVC_C: {}\\n'.format(SVC_acc, SVC_C))\n SVC = svm.SVC(C=1)\n SVC.fit(SVC_train_data, SVC_train_label)\n SVC_result = list(SVC.predict([[x] for x in test_score]))\n right = 0\n for i, _ in enumerate(test_label):\n right += (SVC_result[i] == test_label[i])\n result.writelines('SVC acc: {:.6f}\\n'.format(right/len(test_label)))\n result.writelines('predict results:\\n'+str(SVC_result) +\n '\\nwrong cases:(rank, score, label)\\n')\n for i, _ in enumerate(test_label):\n if test_label[i] != SVC_result[i]:\n result.writelines(str((i, test_score[i], test_label[i]))+', ')\n paint_roc(test_label, SVC_result, 'SVC.png')\n result.writelines('\\n')\n\n# KNN\nif DO_KNN:\n KNN_k = 0\n KNN_acc = 0\n KNN_train_data = [[x] for x in train_score]\n KNN_train_label = [x for x in train_label]\n\n for iter in range(1, 10):\n KNN = neighbors.KNeighborsClassifier(n_neighbors=iter)\n\n KNN_score = cross_val_score(KNN, KNN_train_data, KNN_train_label)\n KNN_score = list(KNN_score)\n KNN_tmp = 0\n for x in KNN_score:\n KNN_tmp += x\n KNN_tmp /= 5\n '''\n KNN.fit(KNN_train_data, KNN_train_label)\n KNN_result = list(KNN.predict([[x] for x in train_score]))\n right = 0\n for i, _ in enumerate(KNN_train_label):\n right += (KNN_result[i] == KNN_train_label[i])\n KNN_tmp = right/len(KNN_train_label)\n '''\n if KNN_tmp >= KNN_acc:\n KNN_acc = KNN_tmp\n KNN_k = iter\n result.writelines(\n '\\nKNN: validation acc is {}, KNN_k: {}\\n'.format(KNN_acc, KNN_k))\n KNN = neighbors.KNeighborsClassifier(n_neighbors=5)\n KNN.fit(KNN_train_data, KNN_train_label)\n KNN_result = list(KNN.predict([[x] for x in test_score]))\n right = 0\n for i, _ in enumerate(test_label):\n right += (KNN_result[i] == test_label[i])\n result.writelines('KNN acc: {:.6f}\\n'.format(right/len(test_label)))\n result.writelines('predict results:\\n'+str(KNN_result) +\n '\\nwrong cases:(rank, score, label)\\n')\n for i, _ in enumerate(test_label):\n if test_label[i] != KNN_result[i]:\n result.writelines(str((i, test_score[i], test_label[i]))+', ')\n paint_roc(test_label, KNN_result, 'KNN.png')\n result.writelines('\\n')\n\n\nresult.writelines('\\nothers\\n')\nresult.writelines('test_patients labels: \\n' + str(test_label)+'\\n')\nresult.writelines('test_patients names:\\n '+str(test_name)+'\\n')\nresult.writelines('test_patients scores:\\n ' + str(test_score)+'\\n')\nresult.close()\n\nwb = openpyxl.Workbook()\nwb.create_sheet('之前的测试数据(三月份)')\nsheet = wb.worksheets[-1]\nsheet.cell(1, 1, '88 patients, MSS=1, MSI=0')\nsheet.cell(1, 2, 'label')\nsheet.cell(1, 3, 'score')\nif DO_LR:\n sheet.cell(1, 4, 'LR predict')\nif DO_SVC:\n sheet.cell(1, 5, 'SVC predict')\nif DO_KNN:\n sheet.cell(1, 6, 'KNN predict')\nif DO_RAW:\n sheet.cell(1, 7, 'raw predict')\nfor i in range(len(test_score)):\n sheet.cell(i+2, 1, test_name[i])\n sheet.cell(i+2, 2, test_label[i])\n sheet.cell(i+2, 3, test_score[i])\n if DO_LR:\n sheet.cell(i+2, 4, LR_result[i])\n if DO_SVC:\n sheet.cell(i+2, 5, SVC_result[i])\n if DO_KNN:\n sheet.cell(i+2, 6, KNN_result[i])\n if DO_RAW:\n sheet.cell(i+2, 7, raw_result[i])\nwb.save('2021.3data.xlsx')\n\"\"\"\n\nnewfile = open('MSS.txt', 'r')\npatients = {}\nMSI_group = os.listdir('newdata/MSIMSS-1')\nMSS_group = os.listdir('newdata/MSIMSS-2')\nMSS_file = open('mss.txt', 'w')\nMSI_file = open('msi.txt', 'w')\nMSS_name = []\nMSI_name = []\nMSS_score = []\nMSI_score = []\nMSS_label = []\nMSI_label = []\nMSS_num = []\nMSI_num = []\nfor line in newfile.readlines():\n if line.split('/')[1] not in patients.keys():\n patients[line.split('/')[1]] = [float(line.split(' ')[1]) > 0.5, 1]\n else:\n patients[line.split('/')[1]][1] += 1\n patients[line.split('/')[1]][0] += float(line.split(' ')[1]) > 0.5\nfor k, v in patients.items():\n tmp = patients[k]\n patients[k] = [tmp[0]/tmp[1]]\n if k in MSI_group:\n MSI_num.append(tmp[1])\n patients[k].append(0)\n MSI_file.writelines(\n k+' '+str(patients[k][0])+' '+str(patients[k][1])+'\\n')\n MSI_name.append(k)\n MSI_score.append(patients[k][0])\n MSI_label.append(patients[k][1])\n elif k in MSS_group:\n MSS_num.append(tmp[1])\n patients[k].append(1)\n MSS_file.writelines(\n k+' '+str(patients[k][0])+' '+str(patients[k][1])+'\\n')\n MSS_name.append(k)\n MSS_score.append(patients[k][0])\n MSS_label.append(patients[k][1])\n else:\n print('exit')\n\nprint(patients)\nwb = openpyxl.Workbook()\n\nLR_C = 0.0699\nLR_train_data = [[x] for x in train_score]\nLR_train_label = [x for x in train_label]\nLR = linear_model.LogisticRegression(C=LR_C)\nLR.fit(LR_train_data, LR_train_label)\nLR_result = list(LR.predict([[x] for x in MSI_score]))\nright = 0\nfor i, _ in enumerate(MSI_label):\n right += (LR_result[i] == MSI_label[i])\nresult.writelines('MSI LR acc: {:.6f}\\n'.format(right/len(MSI_label)))\nresult.writelines('MSI predict results:\\n'+str(LR_result) +\n '\\nwrong cases:(rank, score, label)\\n')\nfor i, _ in enumerate(MSI_label):\n if MSI_label[i] != LR_result[i]:\n result.writelines(str((i, MSI_score[i], MSI_label[i]))+', ')\npaint_roc(MSI_label, LR_result, 'MSI LR.png')\nresult.writelines('\\n')\n\nwb.create_sheet('2021.6.MSIMSS-1')\nsheet = wb.worksheets[-1]\nsheet.cell(1, 1, 'patients: {}, MSS=1, MSI=0'.format(len(MSI_name)))\nsheet.cell(1, 2, 'label')\nsheet.cell(1, 3, 'score')\nsheet.cell(1, 4, 'LR predict')\nsheet.cell(1, 5, 'tile num')\nfor i in range(len(MSI_score)):\n sheet.cell(i+2, 1, MSI_name[i])\n sheet.cell(i+2, 2, MSI_label[i])\n sheet.cell(i+2, 3, MSI_score[i])\n sheet.cell(i+2, 4, LR_result[i])\n sheet.cell(i+2, 5, MSI_num[i])\n\nLR_train_data = [[x] for x in train_score]\nLR_train_label = [x for x in train_label]\nLR = linear_model.LogisticRegression(C=LR_C)\nLR.fit(LR_train_data, LR_train_label)\nLR_result = list(LR.predict([[x] for x in MSS_score]))\nright = 0\nfor i, _ in enumerate(MSS_label):\n right += (LR_result[i] == MSS_label[i])\nresult.writelines('MSS LR acc: {:.6f}\\n'.format(right/len(MSS_label)))\nresult.writelines('MSS predict results:\\n'+str(LR_result) +\n '\\nwrong cases:(rank, score, label)\\n')\nfor i, _ in enumerate(MSS_label):\n if MSS_label[i] != LR_result[i]:\n result.writelines(str((i, MSS_score[i], MSS_label[i]))+', ')\npaint_roc(MSS_label, LR_result, 'MSS LR.png')\nresult.writelines('\\n')\n\nwb.create_sheet('2021.6.MSIMSS-2')\nsheet = wb.worksheets[-1]\nsheet.cell(1, 1, 'patients: {}, MSS=1, MSI=0'.format(len(MSS_name)))\nsheet.cell(1, 2, 'label')\nsheet.cell(1, 3, 'score')\nsheet.cell(1, 4, 'LR predict')\nsheet.cell(1, 5, 'tile num')\nfor i in range(len(MSS_score)):\n sheet.cell(i+2, 1, MSS_name[i])\n sheet.cell(i+2, 2, MSS_label[i])\n sheet.cell(i+2, 3, MSS_score[i])\n sheet.cell(i+2, 4, LR_result[i])\n sheet.cell(i+2, 5, MSS_num[i])\nwb.save('2021.6data.xlsx')\n\"\"\"\n"
]
| [
[
"numpy.array",
"sklearn.metrics.precision_recall_curve",
"sklearn.neighbors.KNeighborsClassifier",
"scipy.stats.norm.interval",
"sklearn.svm.SVC",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.auc",
"sklearn.model_selection.cross_val_score",
"sklearn.metrics.roc_curve"
]
]
|
glenrs/WishbuilderData | [
"424511250246fc56f24e2ae7b92330504b113eeb"
]
| [
"CompletedDatasets/GSE62944/GSE62944_Normal_TPM/parse.py"
]
| [
"import sys, gzip\nimport numpy as np\n\nPatientCancerType = sys.argv[1]\nNormalTPM = sys.argv[2]\ndataOutFile = sys.argv[3]\nmetadataOutFile = sys.argv[4]\nprint(metadataOutFile)\nnamesToAbbreviations = sys.argv[5]\n\n## Read the namesToAbbreviation\nabbvToNamesDict = {}\nwith open(namesToAbbreviations, 'r') as f:\n f.readline()\n for line in f :\n lineList = line.strip('\\n').split('\\t')\n abbvToNamesDict[lineList[2]] = lineList[1]\n\n# This code takes the new transposedNormalTPM and addes the PatientCancerType to the second column and writes it to the outFile data.tsv.gz\npatientIDToCancerDict = {}\nwith gzip.open(PatientCancerType, 'r') as f:\n for line in f:\n lineList= line.decode().strip('\\n').split('\\t')\n patientIDToCancerDict[lineList[0]] = lineList[1]\n\nwith gzip.open(NormalTPM, 'r') as iF:\n data = np.genfromtxt(iF,delimiter='\\t',dtype=str)\n with gzip.open(dataOutFile, 'w') as ofData:\n with gzip.open(metadataOutFile, 'w') as ofMeta:\n firstLine = data.T[0,:]\n ofMeta.write((\"SampleID\\tVariable\\tValue\\n\").encode())\n ofData.write((\"SampleID\\t\" + '\\t'.join(firstLine[1:]) + '\\n').encode())\n for lineList in data.T[1:,:]:\n ofMeta.write((lineList[0] + \"\\tCancer_Type\\t\" + abbvToNamesDict[patientIDToCancerDict[lineList[0]]] + \"\\n\").encode())\n ofData.write(('\\t'.join(lineList) + '\\n').encode())\n"
]
| [
[
"numpy.genfromtxt"
]
]
|
tarunchy/keras-molecules | [
"f56fbcb92db888a49e62d26144cbf9c47761bffd"
]
| [
"molecules/utils.py"
]
| [
"import gzip\nimport pandas\nimport h5py\nimport numpy as np\n\ndef one_hot_array(i, n):\n return list( map(int, [ix == i for ix in range(n)]))\n\ndef one_hot_index(vec, charset):\n charset = [str(c) for c in charset]\n return list( map(charset.index, vec))\n\ndef from_one_hot_array(vec):\n oh = np.where(vec == 1)\n if oh[0].shape == (0, ):\n return None\n return int(oh[0][0])\n\ndef decode_smiles_from_indexes(vec, charset):\n charset = [str(c) for c in charset]\n return \"\".join(map(lambda x: charset[x], vec)).strip()\n\ndef load_dataset(filename, split = True):\n h5f = h5py.File(filename, 'r')\n if split:\n data_train = h5f['data_train'][:]\n else:\n data_train = None\n data_test = h5f['data_test'][:]\n charset = h5f['charset'][:]\n charset = [str(c) for c in charset]\n h5f.close()\n if split:\n return (data_train, data_test, charset)\n else:\n return (data_test, charset)\n"
]
| [
[
"numpy.where"
]
]
|
libcell/deconvBench | [
"a9a30f987c4a7b9f7ffca64d90add6d2be44f700"
]
| [
"inst/python/rpytools/Autogenges.py"
]
| [
"##--------------------------------------------------------------------##\n# Loading library\n##--------------------------------------------------------------------##\nimport numpy as np\nimport scanpy as sc\nimport scipy as sci\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport autogenes as ag\nfrom sklearn.svm import NuSVR\nimport pickle\n\n\n\n##--------------------------------------------------------------------##\n# Reading in the inputs\n##--------------------------------------------------------------------##\n#read single-cell data\n\n#read 100 synthetic bulks generated by summing single cells\n#the bulks are normalized to counts_per_cell_after=1e4\n#proportions are only required for validation\nadata = sc.read_h5ad(\"scRNA.h5ad\")\n\ndata_bulk_raw = pd.read_csv('Bulk_data.csv',delimiter=',', index_col=0)\n\n##--------------------------------------------------------------------##\n# Preprocessing single-cell data\n##--------------------------------------------------------------------##\n#normalizing and selecting 4000 hihgly variable genes for optimization\n#we use log normalized data for selecting hihgly variable genes and visualization\nadata_norm = sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4, copy=True) \n\n\nsc.pp.highly_variable_genes(adata_norm, flavor='cell_ranger',n_top_genes=4000)\n\n\nadata_proc = adata_norm[:, adata_norm.var[adata_norm.var['highly_variable']==True].index]\n\n\n\n###画PCA\nsc.pp.pca(adata_norm, n_comps=30, use_highly_variable=True, svd_solver='arpack')\n\n\n\n\n\n###画CEll cluster图\nadata_norm.obs['cells'] = [x.split('_', 1)[0] for x in adata_norm.obs_names]\n\n#filter cells in normalized data\nadata_proc = adata_proc[adata_norm.obs_names]\n\n\n\n\n#calculating the centroids of cell types\nclusters = pd.Series.unique(adata_norm.obs['cells'])\n\nsc_mean = pd.DataFrame(index=adata_proc.var_names, columns=clusters)\n\nfor cluster in clusters:\n cells = [x for x in adata_proc.obs_names if x.startswith(cluster)]\n sc_part = adata_proc[cells,:].X.T\n sc_mean[cluster] = pd.DataFrame(np.mean(sc_part,axis=1),index=adata_proc.var_names)\ncentroids_sc_hv = sc_mean\n\n\n####\n###\n###AutoGeneS\nag.init(centroids_sc_hv.T)\n\nag.optimize(ngen=5000,seed=0,nfeatures=400,mode='fixed',offspring_size=100,verbose=False)\n\nindex = ag.select(index=0)\n\ncentroids_sc_pareto = centroids_sc_hv[index]\n\ncoef_nusvr = ag.deconvolve(data_bulk_raw.T, model='nusvr')\ncoef_nnls = ag.deconvolve(data_bulk_raw.T, model='nnls')\n\nsave1 = pd.DataFrame(coef_nusvr) \nsave1.to_csv('/Users/touyasushishin/Desktop/Autogens_NNLS.csv',index=True,header=True) \n \nsave2 = pd.DataFrame(coef_nusvr) \nsave2.to_csv('/Users/touyasushishin/Desktop/Autogens_NuSVR.csv',index=True,header=True) \n \n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.mean",
"pandas.Series.unique"
]
]
|
collector-m/ST3D | [
"720e04aa3dc4bb95ac336171b240b6c3130144e5"
]
| [
"pcdet/datasets/kitti/kitti_utils.py"
]
| [
"import numpy as np\nfrom ...utils import box_utils\n\nfrom ..dataset import DatasetTemplate as Dataset\n\n\ndef transform_annotations_to_kitti_format(annos, map_name_to_kitti=None, info_with_fakelidar=False, **kwargs):\n \"\"\"\n Args:\n annos:\n map_name_to_kitti: dict, map name to KITTI names (Car, Pedestrian, Cyclist)\n info_with_fakelidar:\n Returns:\n\n \"\"\"\n for anno in annos:\n if 'name' not in anno:\n anno['name'] = anno['gt_names']\n anno.pop('gt_names')\n\n for k in range(anno['name'].shape[0]):\n if anno['name'][k] in map_name_to_kitti:\n anno['name'][k] = map_name_to_kitti[anno['name'][k]]\n else:\n anno['name'][k] = 'Person_sitting'\n\n if 'boxes_lidar' in anno:\n gt_boxes_lidar = anno['boxes_lidar'].copy()\n elif 'gt_boxes_lidar' in anno:\n gt_boxes_lidar = anno['gt_boxes_lidar'].copy()\n else:\n gt_boxes_lidar = anno['gt_boxes'].copy()\n\n # filter by fov\n if kwargs.get('is_gt', None) and kwargs.get('GT_FILTER', None):\n if kwargs.get('FOV_FILTER', None):\n gt_boxes_lidar = filter_by_fov(anno, gt_boxes_lidar, kwargs)\n\n # filter by range\n if kwargs.get('GT_FILTER', None) and kwargs.get('RANGE_FILTER', None):\n point_cloud_range = kwargs['RANGE_FILTER']\n gt_boxes_lidar = filter_by_range(anno, gt_boxes_lidar, point_cloud_range, kwargs['is_gt'])\n\n if kwargs.get('GT_FILTER', None):\n anno['gt_boxes_lidar'] = gt_boxes_lidar\n\n anno['bbox'] = np.zeros((len(anno['name']), 4))\n anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]\n anno['truncated'] = np.zeros(len(anno['name']))\n anno['occluded'] = np.zeros(len(anno['name']))\n\n if len(gt_boxes_lidar) > 0:\n if info_with_fakelidar:\n gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)\n\n gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2\n anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))\n anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar\n anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar\n anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar\n dxdydz = gt_boxes_lidar[:, 3:6]\n anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw\n anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0\n anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']\n else:\n anno['location'] = anno['dimensions'] = np.zeros((0, 3))\n anno['rotation_y'] = anno['alpha'] = np.zeros(0)\n\n return annos\n\n\ndef filter_by_range(anno, gt_boxes_lidar, point_cloud_range, is_gt):\n mask = box_utils.mask_boxes_outside_range_numpy(\n gt_boxes_lidar, point_cloud_range, min_num_corners=1\n )\n gt_boxes_lidar = gt_boxes_lidar[mask]\n anno['name'] = anno['name'][mask]\n if not is_gt:\n anno['score'] = anno['score'][mask]\n anno['pred_labels'] = anno['pred_labels'][mask]\n\n return gt_boxes_lidar\n\n\ndef filter_by_fov(anno, gt_boxes_lidar, kwargs):\n fov_gt_flag = Dataset.extract_fov_gt(\n gt_boxes_lidar, kwargs['FOV_DEGREE'], kwargs['FOV_ANGLE']\n )\n gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag]\n anno['name'] = anno['name'][fov_gt_flag]\n\n return gt_boxes_lidar\n"
]
| [
[
"numpy.arctan2",
"numpy.zeros"
]
]
|
randrew2636/Visualising_BZ | [
"f73016a3cef3b9d041d60d2484b346d8941e6f96"
]
| [
"BZ_ZONES_counting.py"
]
| [
"# Richard Andrew 2020\n# \nimport numpy as np\nimport pandas as pd\n\nimport time\nimport os\n#--------------------------------------------------------\n\n# Run f2py compiling the openMP fortran file into a python-callable library\n\nos.system(\"f2py3.7 -c -m ws bzones_lib_counting.f90 --f90flags=-fopenmp -lgomp\")\n\n# Import this library\n\nimport ws\n\n# NOTE 'ws.*.so' is the library and 'ws' is the import name\n\n#----------------------------------------------\n# Gvect generator function\n\ndef gvect(gs,gcut,n):\n # define spanning vectors\n g = np.zeros(((n+2)*(n+2)*(n+2)*8*8*8,3))\n l=0\n # Generate all G vectors in more or less order of increasing shell size within cutoff radius\n print ('Generating Gvect')\n for i in range(-n,n+1):\n for j in range(-n,n+1):\n for k in range(-n,n+1):\n if(i==0 and j==0 and k==0):\n continue\n g[l,:] = i*gs[0,:] + j*gs[1,:] + k*gs[2,:]\n gg = np.linalg.norm(g[l])**2\n if(gg-gcut > 0.0001):\n continue\n l=l + 1\n print ()\n print ('Number of vectors= ',l)\n # order G vectors into shells of increasing radius\n gg = np.zeros(l)\n for i in range(l):\n gg[i]=np.linalg.norm(g[i])**2\n idx = np.argsort(gg)\n gg=gg[idx]\n g=g[idx]\n g=g[0:l]\n return g\n#----------------------------------------------------------------------\n\nPI = np.pi\n#### Enter in parameters\n# Enter crystal system type\n\nprint ('Enter cystal system type:')\nprint ()\nprint ('1-CUBIC')\nprint ('2-TETRAGONAL')\nprint ('3-ORTHORHOMBIC')\nprint ('4-MONOCLINIC')\nprint ('5-TRICLINIC')\nprint ('6-HEXAGONAL')\nprint ('7-TRIGONAL')\nprint ()\ncrys = int(input('Enter (default 1): ') or '1')\n\n# Enter crystal type and associated parameters\n\nca=1.0\nba=1.0\nif(crys==1):\n print ('P-1 I-2 F-3')\n s = int(input('Enter: (default 3) ') or '3')\nelif(crys==2):\n print ('P-1 I-2')\n s = int(input('Enter: ') or '2')\n ca = float(input('enter c/a (default 0.5) ') or '0.5')\nelif(crys==3):\n print ('P-1 I-2 F-3 C-4')\n s = int(input('Enter: (default 3) ') or '3')\n ba = float(input('enter b/a (default 0.5) ') or '0.5')\n ca = float(input('enter c/a (default 0.5) ') or 'o.5')\nelif(crys==4):\n print ('P-1 I-2-NOT CHECKED')\n s = int(input('Enter: (default 1) ') or '1')\n ba = float(input('enter b/a (default 0.5) ') or '0.5')\n ca = float(input('enter c/a (default 0.5) ') or '0.5')\n gamma = float(input('enter gamma angle between a anc b vectors '))\n gamma=gamma*PI/180.0\nelif(crys==5):\n ba = float(input('enter b/a (default 0.5) ') or '0.5')\n ca = float(input('enter c/a (default 0.5) ') or '0.5')\n\n alpha = float(input('enter alpha angle between b anc c vectors (default 30) ') or '30')\n beta = float(input('enter beta angle between a anc c vectors (default 30) ') or '30')\n gamma = float(input('enter gamma angle between a anc b vectors (default 30) ') or '30')\n\n alpha=alpha*PI/180.0\n beta=beta*PI/180.0\n gamma=gamma*PI/180.0\nelse:\n ca = float(input('enter c/a (default 0.5) ') or '0.5')\n\nprint ()\n\n# RS are the spanning vectors\n\nRS = np.zeros(shape=(3,3))\n\n\n# Set the spanning vectors in units of 'a'\n\nif (crys==1 and s==1):\n RS[0,0] = 1.0\n RS[0,1] = 0.0\n RS[0,2] = 0.0\n\n RS[1,0] = 0.0\n RS[1,1] = 1.0\n RS[1,2] = 0.0\n\t\n RS[2,0] = 0.0\n RS[2,1] = 0.0\n RS[2,2] = 1.0\n\nelif (crys==1 and s==2):\n\n RS[0,0] =-0.5\n RS[0,1] = 0.5\n RS[0,2] = 0.5\n\n RS[1,0] = 0.5\n RS[1,1] =-0.5\n RS[1,2] = 0.5\n\n RS[2,0] = 0.5\n RS[2,1] = 0.5\n RS[2,2] =-0.5\n\nelif (crys==1 and s==3):\n\n RS[0,0] = 0.5\n RS[0,1] = 0.5\n RS[0,2] = 0.0\n\n RS[1,0] = 0.5\n RS[1,1] = 0.0\n RS[1,2] = 0.5\n\n RS[2,0] = 0.0\n RS[2,1] = 0.5\n RS[2,2] = 0.5\n\nelif (crys==2 and s==1):\n \n RS[0,0] = 1.0\n RS[0,1] = 0.0\n RS[0,2] = 0.0\n\n RS[1,0] = 0.0\n RS[1,1] = 1.0\n RS[1,2] = 0.0\n\n RS[2,0] = 0.0\n RS[2,1] = 0.0\n RS[2,2] = ca\n\nelif (crys==2 and s==2):\n\n RS[0,0] =-0.5\n RS[0,1] = 0.5\n RS[0,2] = ca/2.0\n\n RS[1,0] = 0.5\n RS[1,1] =-0.5\n RS[1,2] = ca/2.0\n\n RS[2,0] = 0.5\n RS[2,1] = 0.5\n RS[2,2] =-ca/2.0\n\nelif (crys==3 and s==1):\n\n RS[0,0] = 1.0\n RS[0,1] = 0.0\n RS[0,2] = 0.0\n\n RS[1,0] = 0.0\n RS[1,1] = ba\n RS[1,2] = 0.0\n\n RS[2,0] = 0.0\n RS[2,1] = 0.0\n RS[2,2] = ca\n\nelif (crys==3 and s==2):\n\n RS[0,0] =-0.5\n RS[0,1] = 0.5*ba\n RS[0,2] = 0.5*ca\n\n RS[1,0] = 0.5\n RS[1,1] =-0.5*ba\n RS[1,2] = 0.5*ca\n\n RS[2,0] = 0.5\n RS[2,1] = 0.5*ba\n RS[2,2] =-0.5*ca\n\nelif (crys==3 and s==3):\n\n RS[0,0] = 0.5\n RS[0,1] = 0.5*ba\n RS[0,2] = 0.0\n\n RS[1,0] = 0.5\n RS[1,1] = 0.0\n RS[1,2] = 0.5*ca\n\n RS[2,0] = 0.0\n RS[2,1] = 0.5*ba\n RS[2,2] = 0.5*ca\n\nelif (crys==3 and s==4):\n\n RS[0,0] = 0.5\n RS[0,1] = 0.5*ba\n RS[0,2] = 0.0\n\n RS[1,0] =-0.5\n RS[1,1] = 0.5*ba\n RS[1,2] = 0.0\n\n RS[2,0] = 0.0\n RS[2,1] = 0.0\n RS[2,2] = ca\n\nelif(crys==4 and s==1):\n\n RS[0,0] = 1.0\n RS[0,1] = 0.0\n RS[0,2] = 0.0\n\n RS[1,0] = ba*np.cos(gamma)\n RS[1,1] = ba*np.sin(gamma)\n RS[1,2] = 0.0\n\n RS[2,0] = 0.0\n RS[2,1] = 0.0\n RS[2,2] = ca\n\nelif(crys==4 and s==2):# NEEDS TO BE checked\n\n RS[0,0] = 0.5\n RS[0,1] = 0.0\n RS[0,2] = -ca/2.0\n\n RS[1,0] = ba*np.cos(gamma)\n RS[1,1] = ba*np.sin(gamma)\n RS[1,2] = 0.0\n\n RS[2,0] = 0.5\n RS[2,1] = 0.0\n RS[2,2] = ca/2.0\n\nelif (crys==5):\n\n RS[0,0] = 1.0\n RS[0,1] = 0.0\n RS[0,2] = 0.0\n\n RS[1,0] = ba*np.cos(gamma)\n RS[1,1] = ba*np.sin(gamma)\n RS[1,2] = 0.0\n\n RS[2,0] = ca*np.cos(beta)\n RS[2,1] = ca*(np.cos(alpha)-np.cos(beta)*np.cos(gamma))/np.sin(gamma)\n RS[2,2] = ca*np.sqrt(1.0+2.0*np.cos(alpha)*np.cos(beta)*np.cos(gamma)-np.cos(alpha)**2-np.cos(beta)**2-np.cos(gamma)**2)/np.sin(gamma)\n\nelse:\n RS[0,0] = 1.0\n RS[0,1] = 0.0\n RS[0,2] = 0.0\n\n RS[1,0] =-0.5\n RS[1,1] = np.sqrt(3.0)/2.0\n RS[1,2] = 0.0\n\n RS[2,0] = 0.0\n RS[2,1] = 0.0\n RS[2,2] = ca\n\nprint\nprint ('r1= ',RS[0,0], RS[0,1], RS[0,2])\nprint ('r2= ',RS[1,0], RS[1,1], RS[1,2])\nprint ('r3= ',RS[2,0], RS[2,1], RS[2,2])\nprint\n\n# Generate reciprocal spanning vectors in units of 2pi/a\n\nGS = np.zeros(shape=(3,3))\n\nV = np.abs(RS[0,0]*(RS[1,1]*RS[2,2] - RS[2,1]*RS[1,2])+RS[0,1]*(RS[2,0]*RS[1,2] - RS[1,0]*RS[2,2]) + RS[0,2]*(RS[1,0]*RS[2,1] - RS[2,0]*RS[1,1]))\n\nprint ('Unit cell volume= ',V)\n\nGS[0,0] = (RS[0,1]*RS[1,2] - RS[1,1]*RS[0,2])/V\nGS[0,1] = (RS[1,0]*RS[0,2] - RS[1,2]*RS[0,0])/V\nGS[0,2] = (RS[0,0]*RS[1,1] - RS[1,0]*RS[0,1])/V\n\nGS[1,0] = (RS[1,1]*RS[2,2] - RS[2,1]*RS[1,2])/V\nGS[1,1] = (RS[2,0]*RS[1,2] - RS[1,0]*RS[2,2])/V\nGS[1,2] = (RS[1,0]*RS[2,1] - RS[2,0]*RS[1,1])/V\n\nGS[2,0] = (RS[2,1]*RS[0,2] - RS[0,1]*RS[2,2])/V\nGS[2,1] = (RS[0,0]*RS[2,2] - RS[2,0]*RS[0,2])/V\nGS[2,2] = (RS[2,0]*RS[0,1] - RS[0,0]*RS[2,1])/V\nprint ()\nprint ('b1= ',GS[0,0], GS[0,1], GS[0,2])\nprint ('b2= ',GS[1,0], GS[1,1], GS[1,2])\nprint ('b3= ',GS[2,0], GS[2,1], GS[2,2])\nprint ()\n\n# set cutoff radius squared and generate Gvect\n\nGC = int(input('Enter Gcut^2 (default 25) ') or '25')\n\n# define reciprocal lattice spanning vectors\n\nt1=time.time()\n\n# estimate number of first spanning vector from gamma to cutoff radius\n# and allocate and initialize G vectors:generate ordered G vectors\n\nGGS1 = GS[0,0]**2 + GS[0,1]**2 + GS[0,2]**2\nGGS2 = GS[1,0]**2 + GS[1,1]**2 + GS[1,2]**2\nGGS3 = GS[2,0]**2 + GS[2,1]**2 + GS[2,2]**2\nGGS = min(GGS1,GGS2,GGS3)\n\nnr=int(np.sqrt(GC/GGS)) + 1\n\nG = gvect(GS,GC,nr) # generate G vector with length squared less than Gcut^2\n\nll=len(G)\nprint(\"length G \",ll)\n\n# Define GRID based on parallel-piped based on b1,b2,b3\n\nm1 = int(input('enter how many multiples of G1 (default 2) ') or '2')\nm2 = int(input('enter how many multiples of G2 (default 2) ') or '2')\nm3 = int(input('enter how many multiples of G3 (default 2) ') or '2')\nN1 = int(input('Enter number of grid points along 1 direction (default 300) ') or '300')\nN2 = int(input('Enter number of grid points along 2 direction (default 300) ') or '300')\nN3 = int(input('Enter number of grid points along 3 direction (default 300) ') or '300')\n\n# choose bz\n\nBZ = int(input('Enter BZ (default 16) ') or '16') \n\n# open files to store BZ data\n\nif (os.path.exists('BZ_ZONE.dat')):\n os.system(\"rm -f BZ_ZONE.dat\")\n\n# call fortran procedure through ws library to create k-points within bz\n# Results saved to a datafile\n\nws.gen_kp(m1,m2,m3,N1,N2,N3,BZ,GS,G,ll)\n\nprint ()\nt2=time.time()\nprint ('time: ','%E' %(t2-t1), 'sec')\n\n# read k-points into a DataFrame\n\nout_df = pd.read_csv(\"./BZ_ZONE.dat\",header=None,delim_whitespace=True)\nos.system(\"rm -f BZ_ZONE.dat\") # delete k-point datafile\nprint('file read')\nout_df.columns = ['x','y','z','radius','BZ']\nprint(out_df.head())\n\nprint(out_df.describe())\n\n# import visualisation library\n\nfrom mayavi import mlab\n\nmlab.figure(figure=None, bgcolor=(0.7,0.7,0.7), fgcolor=None, engine=None, size=(4000, 3500))\n\nx = out_df[['x']].values\ny = out_df[['y']].values\nz = out_df[['z']].values\nrad = out_df[['radius']].values\n\n# create scatter pipeline with x,y,z and radius to define depth colors\n\nsrc = mlab.pipeline.scalar_scatter(x, y, z, rad)\n\n# plot points in a mayavi sceen window\n\n#pts = mlab.pipeline.glyph(src, scale_mode='none',scale_factor=.1,colormap=\"winter\")\npts = mlab.pipeline.glyph(src, scale_mode='none',scale_factor=.1,colormap=\"gray\")\n\nmlab.show()"
]
| [
[
"numpy.sin",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.abs",
"numpy.sqrt",
"numpy.argsort",
"numpy.cos",
"pandas.read_csv"
]
]
|
deepchatterjeevns/Monk_Object_Detection | [
"861c6035e975ecdf3ea07273f7479dbf60fbf9b2"
]
| [
"8_pytorch_rfbnet/lib/train_detector.py"
]
| [
"import sys\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nimport torch.nn.init as init\nimport argparse\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nfrom data import VOCroot, COCOroot, VOC_300, VOC_512, COCO_300, COCO_512, COCO_mobile_300, AnnotationTransform, COCODetection, VOCDetection, detection_collate, BaseTransform, preproc\nfrom layers.modules import MultiBoxLoss\nfrom layers.functions import PriorBox\nimport time\n\nclass Detector():\n '''\n Class to train a detector\n\n Args:\n verbose (int): Set verbosity levels\n 0 - Print Nothing\n 1 - Print desired details\n '''\n def __init__(self, verbose=1):\n self.system_dict = {};\n self.system_dict[\"verbose\"] = verbose;\n self.system_dict[\"local\"] = {};\n self.system_dict[\"dataset\"] = {};\n self.system_dict[\"dataset\"][\"train\"] = {};\n self.system_dict[\"dataset\"][\"val\"] = {};\n self.system_dict[\"dataset\"][\"val\"][\"status\"] = False;\n\n self.system_dict[\"params\"] = {};\n\n self.system_dict[\"params\"][\"version\"] = \"RFB_vgg\"; #RFB_E_vgg or RFB_mobile version\n self.system_dict[\"params\"][\"basenet\"] = \"weights/vgg16_reducedfc.pth\";\n self.system_dict[\"params\"][\"cuda\"] = True;\n self.system_dict[\"params\"][\"ngpu\"] = 1;\n\n self.system_dict[\"params\"][\"dataset\"] = \"COCO\";\n self.system_dict[\"params\"][\"num_workers\"] = 3;\n self.system_dict[\"params\"][\"size\"] = 512; #300;\n self.system_dict[\"params\"][\"batch_size\"] = 4;\n\n self.system_dict[\"params\"][\"jaccard_threshold\"] = 0.5;\n self.system_dict[\"params\"][\"lr\"] = 0.0001;\n self.system_dict[\"params\"][\"momentum\"] = 0.9;\n self.system_dict[\"params\"][\"weight_decay\"] = 0.0005;\n self.system_dict[\"params\"][\"gamma\"] = 0.1;\n\n self.system_dict[\"params\"][\"resume_epoch\"] = 0\n self.system_dict[\"params\"][\"resume_net\"] = None;\n \n self.system_dict[\"params\"][\"max_epoch\"] = 200;\n self.system_dict[\"params\"][\"log_iters\"] = True;\n self.system_dict[\"params\"][\"save_folder\"] = \"weights/\";\n\n\n def Train_Dataset(self, root_dir, coco_dir, set_dir, batch_size=4, image_size=512, num_workers=3):\n '''\n User function: Set training dataset parameters\n\n Dataset Directory Structure\n\n root_dir\n |\n |------coco_dir \n | |\n | |----<set_dir>\n | |\n | |---------img1.jpg\n | |---------img2.jpg\n | |---------..........(and so on) \n |\n |\n | |---annotations \n | |----|\n | |--------------------instances_<set_dir>.json\n | |--------------------classes.txt\n \n \n - instances_<set_dir>.json -> In proper COCO format\n - classes.txt -> A list of classes in alphabetical order\n \n\n For TrainSet\n - root_dir = \"../sample_dataset\";\n - coco_dir = \"kangaroo\";\n - set_dir = \"Images\";\n \n\n Note: Annotation file name too coincides against the set_dir\n\n Args:\n root_dir (str): Path to root directory containing coco_dir\n coco_dir (str): Name of coco_dir containing image folder and annotation folder\n set_dir (str): Name of folder containing all training images\n batch_size (int): Mini batch sampling size for training epochs\n image_size (int): Either of [512, 300]\n num_workers (int): Number of parallel processors for data loader \n\n Returns:\n None\n '''\n self.system_dict[\"dataset\"][\"train\"][\"root_dir\"] = root_dir;\n self.system_dict[\"dataset\"][\"train\"][\"coco_dir\"] = coco_dir;\n self.system_dict[\"dataset\"][\"train\"][\"set_dir\"] = set_dir;\n\n self.system_dict[\"params\"][\"batch_size\"] = batch_size;\n self.system_dict[\"params\"][\"size\"] = image_size;\n self.system_dict[\"params\"][\"num_workers\"] = num_workers;\n\n\n def Val_Dataset(self, root_dir, coco_dir, set_dir):\n '''\n User function: Set training dataset parameters\n\n Dataset Directory Structure\n\n root_dir\n |\n |------coco_dir \n | |\n | |----<set_dir>\n | |\n | |---------img1.jpg\n | |---------img2.jpg\n | |---------..........(and so on) \n |\n |\n | |---annotations \n | |----|\n | |--------------------instances_<set_dir>.json\n | |--------------------classes.txt\n \n \n - instances_<set_dir>.json -> In proper COCO format\n - classes.txt -> A list of classes in alphabetical order\n \n\n For TrainSet\n - root_dir = \"../sample_dataset\";\n - coco_dir = \"kangaroo\";\n - set_dir = \"Images\";\n \n\n Note: Annotation file name too coincides against the set_dir\n\n Args:\n root_dir (str): Path to root directory containing coco_dir\n coco_dir (str): Name of coco_dir containing image folder and annotation folder\n set_dir (str): Name of folder containing all training images\n\n Returns:\n None\n '''\n self.system_dict[\"dataset\"][\"val\"][\"status\"] = True;\n self.system_dict[\"dataset\"][\"val\"][\"root_dir\"] = root_dir;\n self.system_dict[\"dataset\"][\"val\"][\"coco_dir\"] = coco_dir;\n self.system_dict[\"dataset\"][\"val\"][\"set_dir\"] = set_dir; \n \n\n def Model(self, model_name=\"vgg\", use_gpu=True, ngpu=1):\n '''\n User function: Set Model parameters\n\n Available Models\n vgg\n e_vgg\n mobilenet\n\n Args:\n model_name (str): Select model from available models\n use_gpu (bool): If True, model is loaded on GPU else cpu\n ngpu (int): Number of GPUs to use in parallel\n\n Returns:\n None\n '''\n if(not os.path.isdir(\"weights/\")):\n cmd1 = \"cp \" + os.path.dirname(os.path.realpath(__file__)) + \"/download.sh \" + os.getcwd() + \"/.\";\n os.system(cmd1);\n os.system(\"chmod +x download.sh\");\n os.system(\"./download.sh\");\n if(model_name == \"vgg\"):\n self.system_dict[\"params\"][\"version\"] = \"RFB_vgg\";\n self.system_dict[\"params\"][\"basenet\"] = \"weights/vgg16_reducedfc.pth\";\n elif(model_name == \"e_vgg\"):\n self.system_dict[\"params\"][\"version\"] = \"RFB_E_vgg\";\n self.system_dict[\"params\"][\"basenet\"] = \"weights/vgg16_reducedfc.pth\";\n elif(model_name == \"mobilenet\"):\n self.system_dict[\"params\"][\"basenet\"] = \"weights/mobilenet_feature.pth\";\n self.system_dict[\"params\"][\"version\"] = \"RFB_mobile\";\n\n self.system_dict[\"params\"][\"cuda\"] = use_gpu;\n self.system_dict[\"params\"][\"ngpu\"] = ngpu;\n\n\n def Set_HyperParams(self, lr=0.0001, momentum=0.9, weight_decay=0.0005, gamma=0.1, jaccard_threshold=0.5):\n '''\n User function: Set hyper parameters\n\n Args:\n lr (float): Initial learning rate for training\n momentum (float): Momentum value for optimizer\n weight_decay (float): Decay term for weights durng training for better regularization\n gamma (float): Multiplicative factor for learning rate \n jaccard_threshold (float): Limit nms thresholding \n print_interval (int): Post every specified iteration the training losses and accuracies will be printed\n\n Returns:\n None\n '''\n self.system_dict[\"params\"][\"jaccard_threshold\"] = jaccard_threshold;\n self.system_dict[\"params\"][\"lr\"] = lr;\n self.system_dict[\"params\"][\"momentum\"] = momentum;\n self.system_dict[\"params\"][\"weight_decay\"] = weight_decay;\n self.system_dict[\"params\"][\"gamma\"] = gamma;\n\n\n\n def Train(self, epochs=200, log_iters=True, output_weights_dir=\"weights\", saved_epoch_interval=10):\n '''\n User function: Start training\n\n Args:\n epochs (int): Number of epochs to train for\n log_iters (bool): If True, logs will be saved\n output_weights_dir (str): Folder path to save trained weights\n saved_epoch_interval (int): Save intermediate weights aver every \"saved_epoch_interval\" number of epochs\n\n Returns:\n None\n '''\n self.system_dict[\"params\"][\"max_epoch\"] = epochs;\n self.system_dict[\"params\"][\"log_iters\"] = log_iters;\n self.system_dict[\"params\"][\"save_folder\"] = output_weights_dir;\n\n if not os.path.exists(self.system_dict[\"params\"][\"save_folder\"]):\n os.mkdir(self.system_dict[\"params\"][\"save_folder\"])\n\n if(self.system_dict[\"params\"][\"size\"] == 300):\n cfg = COCO_300;\n else:\n cfg = COCO_512;\n\n if self.system_dict[\"params\"][\"version\"] == 'RFB_vgg':\n from models.RFB_Net_vgg import build_net\n elif self.system_dict[\"params\"][\"version\"] == 'RFB_E_vgg':\n from models.RFB_Net_E_vgg import build_net\n elif self.system_dict[\"params\"][\"version\"] == 'RFB_mobile':\n from models.RFB_Net_mobile import build_net\n cfg = COCO_mobile_300\n else:\n print('Unkown version!')\n\n\n \n img_dim = (300,512)[self.system_dict[\"params\"][\"size\"]==512]\n rgb_means = ((104, 117, 123),(103.94,116.78,123.68))[self.system_dict[\"params\"][\"version\"] == 'RFB_mobile']\n p = (0.6,0.2)[self.system_dict[\"params\"][\"version\"] == 'RFB_mobile']\n \n f = open(self.system_dict[\"dataset\"][\"train\"][\"root_dir\"] + \"/\" + \n self.system_dict[\"dataset\"][\"train\"][\"coco_dir\"] + \"/annotations/classes.txt\", 'r');\n lines = f.readlines();\n if(lines[-1] == \"\"):\n num_classes = len(lines) - 1;\n else:\n num_classes = len(lines) + 1;\n\n \n batch_size = self.system_dict[\"params\"][\"batch_size\"]\n weight_decay = self.system_dict[\"params\"][\"weight_decay\"]\n gamma = self.system_dict[\"params\"][\"gamma\"]\n momentum = self.system_dict[\"params\"][\"momentum\"]\n\n self.system_dict[\"local\"][\"net\"] = build_net('train', img_dim, num_classes)\n\n if self.system_dict[\"params\"][\"resume_net\"] == None:\n base_weights = torch.load(self.system_dict[\"params\"][\"basenet\"])\n print('Loading base network...')\n self.system_dict[\"local\"][\"net\"].base.load_state_dict(base_weights)\n\n def xavier(param):\n init.xavier_uniform(param)\n\n def weights_init(m):\n for key in m.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal_(m.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n m.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n m.state_dict()[key][...] = 0\n\n print('Initializing weights...')\n # initialize newly added layers' weights with kaiming_normal method\n self.system_dict[\"local\"][\"net\"].extras.apply(weights_init)\n self.system_dict[\"local\"][\"net\"].loc.apply(weights_init)\n self.system_dict[\"local\"][\"net\"].conf.apply(weights_init)\n self.system_dict[\"local\"][\"net\"].Norm.apply(weights_init)\n if self.system_dict[\"params\"][\"version\"] == 'RFB_E_vgg':\n self.system_dict[\"local\"][\"net\"].reduce.apply(weights_init)\n self.system_dict[\"local\"][\"net\"].up_reduce.apply(weights_init)\n\n else:\n # load resume network\n print('Loading resume network...')\n state_dict = torch.load(self.system_dict[\"params\"][\"resume_net\"])\n # create new OrderedDict that does not contain `module.`\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n head = k[:7]\n if head == 'module.':\n name = k[7:] # remove `module.`\n else:\n name = k\n new_state_dict[name] = v\n self.system_dict[\"local\"][\"net\"].load_state_dict(new_state_dict)\n\n\n if self.system_dict[\"params\"][\"ngpu\"] > 1:\n self.system_dict[\"local\"][\"net\"] = torch.nn.DataParallel(self.system_dict[\"local\"][\"net\"], device_ids=list(range(self.system_dict[\"params\"][\"ngpu\"])))\n\n if self.system_dict[\"params\"][\"cuda\"]:\n self.system_dict[\"local\"][\"net\"].cuda()\n cudnn.benchmark = True\n\n \n optimizer = optim.SGD(self.system_dict[\"local\"][\"net\"].parameters(), lr=self.system_dict[\"params\"][\"lr\"],\n momentum=self.system_dict[\"params\"][\"momentum\"], weight_decay=self.system_dict[\"params\"][\"weight_decay\"])\n #optimizer = optim.RMSprop(self.system_dict[\"local\"][\"net\"].parameters(), lr=self.system_dict[\"params\"][\"lr\"], alpha = 0.9, eps=1e-08,\n # momentum=self.system_dict[\"params\"][\"momentum\"], weight_decay=self.system_dict[\"params\"][\"weight_decay\"])\n\n criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)\n priorbox = PriorBox(cfg)\n with torch.no_grad():\n priors = priorbox.forward()\n if self.system_dict[\"params\"][\"cuda\"]:\n priors = priors.cuda()\n\n\n self.system_dict[\"local\"][\"net\"].train()\n # loss counters\n loc_loss = 0 # epoch\n conf_loss = 0\n epoch = 0 + self.system_dict[\"params\"][\"resume_epoch\"]\n print('Loading Dataset...')\n\n if(os.path.isdir(\"coco_cache\")):\n os.system(\"rm -r coco_cache\")\n\n dataset = COCODetection(self.system_dict[\"dataset\"][\"train\"][\"root_dir\"], \n self.system_dict[\"dataset\"][\"train\"][\"coco_dir\"], \n self.system_dict[\"dataset\"][\"train\"][\"set_dir\"], \n preproc(img_dim, rgb_means, p))\n\n\n epoch_size = len(dataset) // self.system_dict[\"params\"][\"batch_size\"]\n max_iter = self.system_dict[\"params\"][\"max_epoch\"] * epoch_size\n\n stepvalues = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)\n print('Training', self.system_dict[\"params\"][\"version\"], 'on', dataset.name)\n step_index = 0\n\n if self.system_dict[\"params\"][\"resume_epoch\"] > 0:\n start_iter = self.system_dict[\"params\"][\"resume_epoch\"] * epoch_size\n else:\n start_iter = 0\n\n lr = self.system_dict[\"params\"][\"lr\"]\n\n\n for iteration in range(start_iter, max_iter):\n if iteration % epoch_size == 0:\n # create batch iterator\n batch_iterator = iter(data.DataLoader(dataset, batch_size,\n shuffle=True, num_workers=self.system_dict[\"params\"][\"num_workers\"], \n collate_fn=detection_collate))\n loc_loss = 0\n conf_loss = 0\n \n torch.save(self.system_dict[\"local\"][\"net\"].state_dict(), self.system_dict[\"params\"][\"save_folder\"] + \"/\" + self.system_dict[\"params\"][\"version\"]+'_'+\n self.system_dict[\"params\"][\"dataset\"] + '_epoches_'+\n 'intermediate' + '.pth')\n epoch += 1\n\n load_t0 = time.time()\n if iteration in stepvalues:\n step_index += 1\n lr = self.adjust_learning_rate(optimizer, self.system_dict[\"params\"][\"gamma\"], epoch, step_index, iteration, epoch_size)\n\n\n # load train data\n images, targets = next(batch_iterator)\n\n #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))\n\n if self.system_dict[\"params\"][\"cuda\"]:\n images = Variable(images.cuda())\n targets = [Variable(anno.cuda()) for anno in targets]\n else:\n images = Variable(images)\n targets = [Variable(anno) for anno in targets]\n # forward\n t0 = time.time()\n out = self.system_dict[\"local\"][\"net\"](images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, priors, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n t1 = time.time()\n loc_loss += loss_l.item()\n conf_loss += loss_c.item()\n load_t1 = time.time()\n if iteration % saved_epoch_interval == 0:\n print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)\n + '|| Current iter ' +\n repr(iteration) + '|| Total iter ' + repr(max_iter) + \n ' || L: %.4f C: %.4f||' % (\n loss_l.item(),loss_c.item()) + \n 'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))\n\n torch.save(self.system_dict[\"local\"][\"net\"].state_dict(), self.system_dict[\"params\"][\"save_folder\"] + \"/\" +\n 'Final_' + self.system_dict[\"params\"][\"version\"] +'_' + self.system_dict[\"params\"][\"dataset\"] + '.pth')\n\n \n\n\n\n def adjust_learning_rate(self, optimizer, gamma, epoch, step_index, iteration, epoch_size):\n '''\n Internal function: Adjust learning rates during training\n\n Args:\n optimizer (pytorch optimizer): Optimizer being used\n gamma (float): Multiplicative factor for learning rate \n epoch (int): Current epoch\n step_index(int): Step index for scheduling learning rate\n iteration (int): Current iteration\n epoch_size (int): Total number of epochs\n\n Returns:\n None\n '''\n if epoch < 6:\n lr = 1e-6 + (self.system_dict[\"params\"][\"lr\"]-1e-6) * iteration / (epoch_size * 5) \n else:\n lr = self.system_dict[\"params\"][\"lr\"] * (gamma ** (step_index))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr"
]
| [
[
"torch.nn.init.xavier_uniform",
"torch.autograd.Variable",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.load"
]
]
|
lquirosd/Order_Relation_Operator | [
"28d7cf9f691bc0f98a18bf37dc673bb08a7c2a6f"
]
| [
"src/text_line_dataset.py"
]
| [
"import os\nimport glob\n\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport cv2\n\nfrom xmlPAGE import pageData\nfrom utils import mkdir, files_exist\n\ndef region_features_from_xml(xml_file, categories):\n \"\"\"\n \"\"\"\n page = pageData(xml_file)\n page.parse()\n img_size = page.get_size()\n rtxt = page.get_sorted_child('TextRegion')\n ttxt = page.get_sorted_child('TableRegion')\n regions = []\n if rtxt is not None:\n regions.extend(rtxt)\n if ttxt is not None:\n regions.extend(ttxt)\n x=[]\n sorted_regions = []\n if len(regions) > 0:\n for region in regions:\n data = {}\n data[\"id\"] = page.get_id(region)\n data[\"parent\"] = page.name\n region_type = page.get_region_type(region)\n if region_type == None:\n region_tag = page.get_tag(region)\n if region_tag in categories:\n region_type = region_tag\n else:\n print (\"Region type undefined for region {}, this region will be ignored.\".format(data[\"id\"]))\n continue\n #--- features: -region type one hot encoding + fst 3 spatial moments + fst 3 central moments\n cl = torch.zeros(len(categories), dtype=torch.float)\n cl[categories.index(region_type)] = 1\n coords = page.get_coords(region) / img_size\n #coords = page.get_coords(region)\n minx = coords[:,0].min()\n maxx = coords[:,0].max()\n miny = coords[:,1].min()\n maxy = coords[:,1].max()\n m = cv2.moments(coords)\n #--- \n data[\"features\"] = torch.cat(\n (\n cl, # one hot encoded region type\n torch.tensor(\n #[m[\"m00\"], m[\"m10\"]/m[\"m00\"], m['m01']/m[\"m00\"]], dtype=torch.float\n [m[\"m00\"], m[\"m10\"]/m[\"m00\"], m['m01']/m[\"m00\"]], dtype=torch.float\n ), # spatial moments\n torch.tensor(\n #[m['mu11'], m['mu20'], m['m02']], dtype=torch.float\n [minx, maxx, miny, maxy], dtype=torch.float\n ), # central moments\n )\n )\n x.append(data)\n sorted_regions.append(data[\"id\"])\n if len(sorted_regions) == 0:\n return None\n else:\n return (x, sorted_regions, page.name)\n\n \n \n\ndef text_line_features_from_xml(xml_file, categories, hier=False):\n \"\"\"\n \"\"\"\n page = pageData(xml_file)\n page.parse()\n img_size = page.get_size()\n text_regions = page.get_sorted_child(\"TextRegion\")\n x = []\n sorted_lines = []\n l0_sorted_lines = []\n l1_sorted_lines = {}\n\n if text_regions != None:\n for region in text_regions:\n rid = page.get_id(region)\n l1_sorted_lines[rid+\"_\"+page.name] = []\n text_lines = page.get_sorted_child(\"TextLine\", region)\n if text_lines != None:\n for line in text_lines:\n data = {}\n cl = torch.zeros(len(categories), dtype=torch.float)\n line_id = page.get_id(line)\n data[\"id\"] = line_id\n data[\"parent\"] = page.name\n data[\"l0_parent\"] = rid\n line_type = page.get_region_type(line)\n if line_type == None:\n # -- use parent type\n print(\n \"Type missing at {} {}, searching for parent type\".format(\n page.name, line_id\n )\n )\n line_type = page.get_region_type(region)\n if line_type == None:\n print(\n \"Type search fail, using region name instead: {}\".format(\n \"TextRegion\"\n )\n )\n line_type = \"TextRegion\"\n cl[categories.index(line_type)] = 1\n line_coords = page.get_baseline(line) / img_size\n line_center = np.mean(line_coords, axis=0)\n data[\"features\"] = torch.cat(\n (\n cl, # one hot encoded line type\n torch.tensor(\n line_center, dtype=torch.float\n ), # line center\n torch.tensor(\n line_coords[0, :], dtype=torch.float\n ), # start_point coord\n torch.tensor(\n line_coords[-1, :], dtype=torch.float\n ), # end point coord\n )\n )\n # line_length = np.array([\n # line_coords[:,0].max()-line_coords[:,0].min(),\n # line_coords[:,1].max()-line_coords[:,1].min()\n # ])\n x.append(data)\n sorted_lines.append(line_id)\n l0_sorted_lines.append(line_id)\n l1_sorted_lines[rid+\"_\"+page.name].append(line_id)\n\n table_regions = page.get_sorted_child(\"TableRegion\")\n if table_regions != None:\n for region in table_regions:\n rid = page.get_id(region)\n l1_sorted_lines[rid+\"_\"+page.name] = []\n cells = page.get_sorted_child(\"TableCell\", region)\n for cell in cells:\n text_lines = page.get_sorted_child(\"TextLine\", cell)\n if text_lines != None:\n for line in text_lines:\n data = {}\n cl = torch.zeros(len(categories), dtype=torch.float)\n line_id = page.get_id(line)\n data[\"id\"] = line_id\n data[\"parent\"] = page.name\n data[\"l0_parent\"] = rid\n line_type = page.get_region_type(line)\n if line_type == None:\n # -- use parent type\n print(\n \"Type missing at {} {}, searching for parent type\".format(\n page.name, line_id\n )\n )\n line_type = page.get_region_type(region)\n if line_type == None:\n print(\n \"Type search fail, using region name instead: {}\".format(\n \"TableRegion\"\n )\n )\n line_type = \"TableRegion\"\n cl[categories.index(line_type)] = 1\n line_coords = page.get_baseline(line) / img_size\n line_center = np.mean(line_coords, axis=0)\n data[\"features\"] = torch.cat(\n (\n cl, # one hot encoded line type\n torch.tensor(\n line_center, dtype=torch.float\n ), # line center\n torch.tensor(\n line_coords[0, :], dtype=torch.float\n ), # start_point coord\n torch.tensor(\n line_coords[-1, :], dtype=torch.float\n ), # end point coord\n )\n )\n x.append(data)\n sorted_lines.append(line_id)\n l0_sorted_lines.append(line_id)\n l1_sorted_lines[rid+\"_\"+page.name].append(line_id)\n if len(sorted_lines) == 0:\n return None\n else:\n return (x, sorted_lines, page.name, l1_sorted_lines)\n\n\nclass PairsInMemoryDataset(Dataset):\n \"\"\"\n Basic Text-line based dataset \n \"\"\"\n\n def __init__(\n self,\n raw_data,\n set_id=\"train\",\n processed_data=\"./processed\",\n categories=[\"page_number\", \"paragraph\", \"marginalia\"],\n hierarchical=False,\n transform=None,\n force_regenerate=False,\n soft_val=False,\n level='line',\n ):\n assert isinstance(raw_data, str)\n assert set_id in [\"train\", \"val\", \"test\", \"prod\"]\n assert isinstance(processed_data, str)\n assert isinstance(categories, list)\n assert level in ['line', 'region']\n super(PairsInMemoryDataset, self).__init__()\n self._RAW_EXTENSION = \"xml\"\n self._PROCESSED_EXTENSION = \"pickle\"\n self._raw_data = raw_data\n self._set_id = set_id\n self._processed_data = processed_data\n self._categories = categories\n self._hierarchical = hierarchical\n self._transform = transform\n self._filenames = self.raw_filenames()\n self._force_regenerate = force_regenerate\n self._soft_val = soft_val\n self._level = level\n\n self._processed_file = os.path.join(\n self._processed_data, self._set_id + \"_\" + self._level + \".\" + self._PROCESSED_EXTENSION\n )\n self.get_data()\n # self._processed_files = [os.path.join(self._processed_data,\n # x + self._PROCESSED_EXTENSION) for x in self._filenames]\n\n def get_data(self):\n if files_exist([self._processed_file]) and self._force_regenerate == False:\n print(\"Loading pre-processed {} data...\".format(self._set_id))\n self.data, self.relatives, self.order, hier, set_id = torch.load(\n self._processed_file\n )\n self._num_features = self.data[0]['features'].size()[0]\n print(\"Done loading.\")\n if hier != self._hierarchical or set_id != self._set_id:\n print(\"Loaded data metadata differs to current specs, regenerating data...\")\n self._force_regenerate = True\n self.get_data()\n else:\n self._build_pairs()\n else:\n print(\"Processig {} data...\".format(self._set_id))\n self.pre_process()\n self._build_pairs()\n print(\"Done processing.\")\n #if self._set_id in [\"val\", \"test\", \"prod\"]:\n # # --- for val, test, prod build all posible pairs instead of generate\n # # --- them randomly as in 'train'\n # if self._set_id == 'val' and self._soft_val == True:\n # pass\n # else:\n # self._build_pairs()\n\n def raw_filenames(self):\n return [\n x\n for x in glob.glob(\n os.path.join(self._raw_data, \"*.\" + self._RAW_EXTENSION)\n )\n ]\n\n def get_num_features(self):\n # --- one-hot category + center + start_point + end_point\n #return 2 * (len(self._categories) + 2 + 2 + 2)\n return 2*self._num_features\n\n def pre_process(self):\n # --- make out dir\n mkdir(self._processed_data)\n\n self._processed_files = []\n data_list = []\n data_relatives = {}\n data_order = {}\n idx = 0\n for f in self._filenames:\n # file_path = os.path.join(self._raw_data, f + \".\" + self._RAW_EXTENSION)\n if self._level == 'line':\n page_data = text_line_features_from_xml(f, self._categories)\n elif self._level == 'region':\n page_data = region_features_from_xml(f, self._categories)\n if page_data:\n self._processed_files.append(f)\n # data = {\"features\":[],\"id\":[],\"parent\":[],\"relationships\":{}}\n #data_relatives[page_data[2]] = []\n #if self._hierarchical:\n # for data in page_data[0]:\n # p = data[\"l0_parent\"]+\"_\"+data[\"parent\"]\n # data_order[p] = page_data[3][p]\n # if p not in data_relatives:\n # data_relatives[p] = []\n # data_relatives[p].append(idx)\n # data_list.append(data)\n if self._hierarchical == False:\n data_order[page_data[2]] = page_data[1]\n for data in page_data[0]:\n if self._hierarchical:\n p = data[\"l0_parent\"]+\"_\"+data[\"parent\"]\n else:\n p = data[\"parent\"]\n if p not in data_relatives:\n data_relatives[p] = []\n if self._hierarchical:\n data_order[p] = page_data[3][p]\n data_relatives[p].append(idx)\n #data_relatives[page_data[2]].append(idx)\n data_list.append(data)\n idx += 1\n else:\n print(\n \"File {} contains no data. Droped from {} set.\".format(\n file_path, self._set_id\n )\n )\n\n torch.save(\n (data_list, data_relatives, data_order, self._hierarchical, self._set_id), self._processed_file\n )\n self.data = data_list\n self.relatives = data_relatives\n self.order = data_order\n self._num_features = data_list[0]['features'].size()[0]\n\n def __getitem__(self, idx):\n if self._set_id == \"train\" or (\n self._set_id == \"val\" and self._soft_val == True\n ):\n # --- for each time a sample is selected gen a random pair from its\n # --- relatives\n if self._hierarchical:\n p = self.data[idx][\"l0_parent\"] + \"_\" + self.data[idx][\"parent\"] \n else:\n p = self.data[idx][\"parent\"]\n relatives = self.relatives[p]\n #--- if relativs == 1 means the element is alone, so the decoder will take care of it. But to keep dataloader to return samples on the same order a dummy pair is generated\n if len(relatives) == 1:\n ridx = idx\n else:\n ridx = torch.randint(0, len(relatives), (1,)).item()\n while relatives[ridx] == idx:\n ridx = torch.randint(0, len(relatives), (1,)).item()\n ridx = relatives[ridx]\n\n x = torch.cat(\n (self.data[idx][\"features\"], self.data[ridx][\"features\"])\n )\n z = (self.data[idx][\"id\"], self.data[ridx][\"id\"])\n # print(\n # self.data[idx]['parent'], self.data[idx]['id'],\n # \" vs \",\n # self.data[ridx]['parent'], self.data[ridx]['id']\n # )\n y = 0 if idx >= ridx else 1\n y = torch.tensor(y, dtype=torch.float)\n\n sample = {\"x\": x, \"t\": y, \"z\": z, 'parent': p}\n if self._transform:\n sample = self._transform(sample)\n\n elif self._set_id == \"val\" and self._soft_val == False:\n sample = self.pairs[idx]\n elif self._set_id == \"test\":\n sample = self.pairs[idx]\n elif self._set_id == \"prod\":\n sample = self.pairs[idx]\n\n return sample\n\n def _build_pairs(self):\n if self._set_id == 'train' or (self._set_id == 'val' and self._soft_val == True):\n return None\n print(\"Build pairs for {} set\".format(self._set_id))\n pairs = []\n for parent, childs in self.relatives.items():\n for i in childs:\n for j in childs:\n if i == j and len(childs) != 1:\n # --- ignore self to self comp since the results is know\n continue\n x = torch.cat(\n (self.data[i][\"features\"], self.data[j][\"features\"])\n )\n y = 0 if i >= j else 1\n y = torch.tensor(y, dtype=torch.float)\n z = (self.data[i][\"id\"], self.data[j][\"id\"])\n pairs.append({\"x\": x, \"t\": y, \"z\": z, \"parent\": parent})\n self.pairs = pairs\n\n def __len__(self):\n if self._set_id in [\"test\", \"prod\"]:\n return len(self.pairs)\n elif self._set_id == \"val\" and self._soft_val == False:\n return len(self.pairs)\n elif self._set_id == \"val\" and self._soft_val == True:\n return len(self.data)\n return len(self.data)\n\n\n"
]
| [
[
"torch.cat",
"torch.save",
"numpy.mean",
"torch.tensor",
"torch.load"
]
]
|
oviazlo/SparseDistance | [
"077385b26cfb160d756817d9b2e5959270b3f745"
]
| [
"sparsedistance/models.py"
]
| [
"# BSD 3-Clause License\n\n# Copyright (c) 2020, Joosep Pata\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport tensorflow as tf\nfrom .utils import split_indices_to_bins, pairwise_dist, sparse_dense_matmult_batch\n\n#Based on the Reformer and GravNet papers\nclass SparseHashedNNDistance(tf.keras.layers.Layer):\n def __init__(self, max_num_bins=200, bin_size=500, num_neighbors=5, dist_mult=0.1, cosine_dist=False, **kwargs):\n super(SparseHashedNNDistance, self).__init__(**kwargs)\n self.num_neighbors = num_neighbors\n self.dist_mult = dist_mult\n\n self.cosine_dist = cosine_dist\n\n #generate the codebook for LSH hashing at model instantiation for up to this many bins\n #set this to a high-enough value at model generation to take into account the largest possible input \n self.max_num_bins = max_num_bins\n\n #each bin will receive this many input elements, in total we can accept max_num_bins*bin_size input elements\n #in each bin, we will do a dense top_k evaluation\n self.bin_size = bin_size\n\n def build(self, input_shape):\n #(n_batch, n_points, n_features)\n\n #generate the LSH codebook for random rotations\n self.codebook_random_rotations = self.add_weight(\n shape=(input_shape[-1], self.max_num_bins//2), initializer=\"random_normal\", trainable=False, name=\"lsh_projections\"\n )\n\n @tf.function\n def call(self, inputs, training=True):\n\n #(n_batch, n_points, n_features)\n point_embedding = inputs\n\n n_batches = tf.shape(point_embedding)[0]\n n_points = tf.shape(point_embedding)[1]\n\n #cannot concat sparse tensors directly as that incorrectly destroys the gradient, see\n #https://github.com/tensorflow/tensorflow/blob/df3a3375941b9e920667acfe72fb4c33a8f45503/tensorflow/python/ops/sparse_grad.py#L33\n #therefore, for training, we implement sparse concatenation by hand \n indices_all = []\n values_all = []\n\n def func(args):\n ibatch, points_batch = args[0], args[1]\n dm = self.construct_sparse_dm_batch(points_batch)\n inds = tf.concat([tf.expand_dims(tf.cast(ibatch, tf.int64)*tf.ones(tf.shape(dm.indices)[0], dtype=tf.int64), -1), dm.indices], axis=-1)\n vals = dm.values\n return inds, vals\n\n elems = (tf.range(0, n_batches, delta=1, dtype=tf.int64), point_embedding)\n ret = tf.map_fn(func, elems, fn_output_signature=(tf.int64, tf.float32), parallel_iterations=1)\n\n shp = tf.shape(ret[0])\n # #now create a new SparseTensor that is a concatenation of the previous ones\n dms = tf.SparseTensor(\n tf.reshape(ret[0], (shp[0]*shp[1], shp[2])),\n tf.reshape(ret[1], (shp[0]*shp[1],)),\n (n_batches, n_points, n_points)\n )\n\n return tf.sparse.reorder(dms)\n\n def subpoints_to_sparse_matrix(self, n_points, subindices, subpoints):\n\n #find the distance matrix between the given points using dense matrix multiplication\n if self.cosine_dist:\n normed = tf.nn.l2_normalize(subpoints, axis=-1)\n dm = tf.linalg.matmul(subpoints, subpoints, transpose_b=True)\n else:\n dm = pairwise_dist(subpoints, subpoints)\n dm = tf.exp(-self.dist_mult*dm)\n\n dmshape = tf.shape(dm)\n nbins = dmshape[0]\n nelems = dmshape[1]\n\n #run KNN in the dense distance matrix, accumulate each index pair into a sparse distance matrix\n top_k = tf.nn.top_k(dm, k=self.num_neighbors)\n top_k_vals = tf.reshape(top_k.values, (nbins*nelems, self.num_neighbors))\n\n indices_gathered = tf.vectorized_map(\n lambda i: tf.gather_nd(subindices, top_k.indices[:, :, i:i+1], batch_dims=1),\n tf.range(self.num_neighbors, dtype=tf.int64))\n\n indices_gathered = tf.transpose(indices_gathered, [1,2,0])\n\n #add the neighbors up to a big matrix using dense matrices, then convert to sparse (mainly for testing)\n # sp_sum = tf.zeros((n_points, n_points))\n # for i in range(self.num_neighbors):\n # dst_ind = indices_gathered[:, :, i] #(nbins, nelems)\n # dst_ind = tf.reshape(dst_ind, (nbins*nelems, ))\n # src_ind = tf.reshape(tf.stack(subindices), (nbins*nelems, ))\n # src_dst_inds = tf.transpose(tf.stack([src_ind, dst_ind]))\n # sp_sum += tf.scatter_nd(src_dst_inds, top_k_vals[:, i], (n_points, n_points))\n # spt_this = tf.sparse.from_dense(sp_sum)\n # validate that the vectorized ops are doing what we want by hand while debugging\n # dm = np.eye(n_points)\n # for ibin in range(nbins):\n # for ielem in range(nelems):\n # idx0 = subindices[ibin][ielem]\n # for ineigh in range(self.num_neighbors):\n # idx1 = subindices[ibin][top_k.indices[ibin, ielem, ineigh]]\n # val = top_k.values[ibin, ielem, ineigh]\n # dm[idx0, idx1] += val\n # assert(np.all(sp_sum.numpy() == dm))\n\n #update the output using intermediate sparse matrices, which may result in some inconsistencies from duplicated indices\n sp_sum = tf.sparse.SparseTensor(indices=tf.zeros((0,2), dtype=tf.int64), values=tf.zeros(0, tf.float32), dense_shape=(n_points, n_points))\n for i in range(self.num_neighbors):\n dst_ind = indices_gathered[:, :, i] #(nbins, nelems)\n dst_ind = tf.reshape(dst_ind, (nbins*nelems, ))\n src_ind = tf.reshape(tf.stack(subindices), (nbins*nelems, ))\n src_dst_inds = tf.cast(tf.transpose(tf.stack([src_ind, dst_ind])), dtype=tf.int64)\n sp_sum = tf.sparse.add(\n sp_sum,\n tf.sparse.reorder(tf.sparse.SparseTensor(src_dst_inds, top_k_vals[:, i], (n_points, n_points)))\n )\n spt_this = tf.sparse.reorder(sp_sum)\n\n return spt_this\n\n def construct_sparse_dm_batch(self, points):\n\n #points: (n_points, n_features) input elements for graph construction\n n_points = tf.shape(points)[0]\n n_features = tf.shape(points)[1]\n\n #compute the number of LSH bins to divide the input points into on the fly\n #n_points must be divisible by bin_size exactly due to the use of reshape\n n_bins = tf.math.floordiv(n_points, self.bin_size)\n #tf.debugging.assert_greater(n_bins, 0)\n\n #put each input item into a bin defined by the softmax output across the LSH embedding\n mul = tf.linalg.matmul(points, self.codebook_random_rotations[:, :n_bins//2])\n #tf.debugging.assert_greater(tf.shape(mul)[2], 0)\n\n cmul = tf.concat([mul, -mul], axis=-1)\n\n #cmul is now an integer in [0..nbins) for each input point\n #bins_split: (n_bins, bin_size) of integer bin indices, which put each input point into a bin of size (n_points/n_bins)\n bins_split = split_indices_to_bins(cmul, n_bins, self.bin_size)\n\n #parts: (n_bins, bin_size, n_features), the input points divided up into bins\n parts = tf.gather(points, bins_split)\n\n #sparse_distance_matrix: (n_points, n_points) sparse distance matrix\n #where higher values (closer to 1) are associated with points that are closely related\n sparse_distance_matrix = self.subpoints_to_sparse_matrix(n_points, bins_split, parts)\n\n return sparse_distance_matrix"
]
| [
[
"tensorflow.sparse.reorder",
"tensorflow.exp",
"tensorflow.shape",
"tensorflow.range",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.gather_nd",
"tensorflow.sparse.SparseTensor",
"tensorflow.reshape",
"tensorflow.map_fn",
"tensorflow.transpose",
"tensorflow.math.floordiv",
"tensorflow.linalg.matmul",
"tensorflow.stack",
"tensorflow.nn.top_k",
"tensorflow.gather",
"tensorflow.nn.l2_normalize",
"tensorflow.cast"
]
]
|
OddExtension5/repo4U | [
"bfd82ed1ec7489745c198b90d5763119b6f82db6"
]
| [
"model/model.py"
]
| [
"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.neighbors import NearestNeighbors\nimport pandas as pd\nimport dill\n\n# Data Pre-Processing\n\n# load the dataset\ndf = pd.read_csv('../data/stars.csv')\n\n# select top languages from dataset\ndf = df[df.language.isin(['Python', 'Jupyter Notebook', 'C++', 'Java', 'Go', 'JavaScript', 'C', 'HTML', 'CSS', 'TypeScript', 'C#', 'Kotlin', 'R', 'Ruby', 'Scala'])]\npopular = pd.DataFrame(df['repo'].value_counts())\n\n# repo having atleast 5 stars\nselect_repos = popular[popular['repo'] >= 5].index.tolist()\nselect_repos = select_repos[1:]\ndf = df[df['repo'].isin(select_repos)]\ndf = df.groupby([\"user\"])[\"repo\"].apply(lambda x: \",\".join(x))\n\n# final data\ndf = pd.DataFrame(df)\n\n\n# NN-Recommender model\nclass NNRecommender:\n def __init__(self, n_neighbors=10, max_features=1000, tokenizer=lambda x: x.split(\",\")):\n self.cv = CountVectorizer(tokenizer=tokenizer, max_features=max_features)\n self.nn = NearestNeighbors(n_neighbors=n_neighbors)\n\n def fit(self, X):\n self.X = X\n X = self.cv.fit_transform(X)\n self.nn.fit(X)\n return self\n\n def predict(self, X):\n Xp = []\n for Xi in X:\n Xt = self.cv.transform([Xi])\n _, neighbors = self.nn.kneighbors(Xt)\n repos = []\n for n in neighbors[0]:\n r = self.X.iloc[int(n)].split(\",\")\n repos.extend(r)\n repos = list(set(repos))\n repos = [r for r in repos if r not in Xi.split(\",\")]\n Xp.append(repos)\n return Xp\n\n# Hyperparameters\nn_neighbors = 10\nmax_features = 1000\n\nmodel = NNRecommender(n_neighbors, max_features)\nmodel.fit(df[\"repo\"])\n\n# serialise the model\nwith open(\"model.pkl\", \"wb\") as f:\n dill.dump(model, f)\n"
]
| [
[
"sklearn.neighbors.NearestNeighbors",
"pandas.DataFrame",
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer"
]
]
|
osigaud/stable-baselines3 | [
"314307c78a173c92b30b56776f96cbe9eb24b27d"
]
| [
"stable_baselines3/cem/policies.py"
]
| [
"from typing import Any, Dict, List, Optional, Type\n\nimport gym\nimport torch as th\nfrom gym import spaces\nfrom torch import nn\n\nfrom stable_baselines3.common.policies import BasePolicy, register_policy\nfrom stable_baselines3.common.preprocessing import get_action_dim\nfrom stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp\n\n\nclass CEMPolicy(BasePolicy):\n \"\"\"\n Policy network for CEM.\n\n :param observation_space: Obervation space\n :param action_space: Action space\n :param net_arch: Network architecture\n :param features_extractor: Network to extract features\n (a CNN when using images, a nn.Flatten() layer otherwise)\n :param features_dim: Number of features\n :param activation_fn: Activation function\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n net_arch: Optional[List[int]] = None,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n normalize_images: bool = True,\n ):\n\n super().__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images=normalize_images,\n squash_output=isinstance(action_space, spaces.Box),\n )\n\n if net_arch is None:\n if features_extractor_class == NatureCNN:\n net_arch = []\n else:\n # Small network otherwise sampling is slow\n net_arch = [64]\n\n self.net_arch = net_arch\n self.features_extractor = self.make_features_extractor()\n self.features_dim = self.features_extractor.features_dim\n self.activation_fn = activation_fn\n\n if isinstance(action_space, spaces.Box):\n action_dim = get_action_dim(action_space)\n actor_net = create_mlp(self.features_dim, action_dim, net_arch, activation_fn, squash_output=True)\n elif isinstance(action_space, spaces.Discrete):\n actor_net = create_mlp(self.features_dim, action_space.n, net_arch, activation_fn)\n else:\n raise NotImplementedError(\"Error: CEM policy not implemented for action space\" f\"of type {type(action_space)}.\")\n\n # Deterministic action\n self.action_net = nn.Sequential(*actor_net)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n data.update(\n dict(\n net_arch=self.net_arch,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n activation_fn=self.activation_fn,\n )\n )\n return data\n\n def forward(self, obs: th.Tensor) -> th.Tensor:\n # only outputs deterministic actions for now\n features = self.extract_features(obs)\n if isinstance(self.action_space, spaces.Box):\n return self.action_net(features)\n elif isinstance(self.action_space, spaces.Discrete):\n logits = self.action_net(features)\n return th.argmax(logits, dim=1)\n else:\n raise NotImplementedError()\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n # Note: the deterministic deterministic parameter is ignored in the case of CEM.\n # Predictions are always deterministic for now.\n return self.forward(observation)\n\n\nMlpPolicy = CEMPolicy\n\nregister_policy(\"MlpPolicy\", CEMPolicy)\n"
]
| [
[
"torch.nn.Sequential",
"torch.argmax"
]
]
|
greenwoodms06/raven | [
"ef1372364a2776385931763f2b28fdf2930c77b9"
]
| [
"tests/framework/unit_tests/Optimizers/test2pointsCrossover.py"
]
| [
"# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n Testing for the twoPointsCrossover method\n @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi\n\"\"\"\nimport os\nimport sys\nimport xarray as xr\nimport numpy as np\n\nravenPath = os.path.abspath(os.path.join(__file__, *['..'] * 5, 'framework'))\nprint('... located RAVEN at:', ravenPath)\nsys.path.append(ravenPath)\nimport Driver\nfrom Optimizers.crossOverOperators.crossovers import returnInstance\n\ntwoPointsCrossover = returnInstance('tester', 'twoPointsCrossover')\n\n#\n#\n# checkers\n#\ndef checkSameDataArrays(comment, resultedDA, expectedDA, update=True):\n \"\"\"\n This method compares two identical things\n @ In, comment, string, a comment printed out if it fails\n @ In, resultedDA, xr.DataArray, the resulted DataArray to be tested\n @ In, expectedDA, xr.DataArray, the expected DataArray\n @ In, update, bool, optional, if False then don't update results counter\n @ Out, res, bool, True if same\n \"\"\"\n res = resultedDA.identical(expectedDA)\n if update:\n if res:\n results[\"pass\"] += 1\n else:\n print(\"checking string\", comment, '|', resultedDA, \"!=\", expectedDA)\n results[\"fail\"] += 1\n return res\n\nresults = {'pass': 0, 'fail': 0}\n#\n#\n# initialization\n#\noptVars = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8']\npopulation = [[11,12,13,14,15,16,17,18],\n [21,22,23,24,25,26,27,28],\n [31,32,33,34,35,36,37,38],\n [41,42,43,44,45,46,47,48]]\npopulation = xr.DataArray(population,\n dims = ['chromosome','Gene'],\n coords = {'chromosome': np.arange(np.shape(population)[0]),\n 'Gene':optVars})\nnParents = 2\nchildren = twoPointsCrossover(population)\n\nprint('twoPointsCrossover')\nprint('*'*19)\nprint('generated children are: {}'.format(children))\nexpectedChildren = xr.DataArray([[ 11., 22., 23., 24., 25., 26., 17., 18.],\n [ 21., 12., 13., 14., 15., 16., 27., 28.],\n [ 11., 12., 13., 14., 15., 16., 37., 18.],\n [ 31., 32., 33., 34., 35., 36., 17., 38.],\n [ 11., 42., 43., 44., 45., 46., 47., 18.],\n [ 41., 12., 13., 14., 15., 16., 17., 48.],\n [ 21., 22., 33., 34., 35., 36., 37., 28.],\n [ 31., 32., 23., 24., 25., 26., 27., 38.],\n [ 21., 22., 43., 44., 45., 26., 27., 28.],\n [ 41., 42., 23., 24., 25., 46., 47., 48.],\n [ 31., 42., 43., 44., 45., 36., 37., 38.],\n [ 41., 32., 33., 34., 35., 46., 47., 48.]],\n dims = ['chromosome','Gene'],\n coords = {'chromosome': np.arange(12),\n 'Gene' : optVars})\n\n## TESTING\n# Test survivor population\ncheckSameDataArrays('Check survived population data array',children,expectedChildren)\n#\n# end\n#\nprint('Results:', results)\nsys.exit(results['fail'])\n"
]
| [
[
"numpy.arange",
"numpy.shape"
]
]
|
LittleWat/MCD_DA | [
"af10217c5c5451dcd8bc3e975a7d067c285cc029"
]
| [
"segmentation/tools/concat_rgb_gt_pred_img.py"
]
| [
"\"\"\"\nCompare predicted visualized png.\n\nCreate merged png image that is randomly selected with original RGB image and GT.\n\"\"\"\n\nimport argparse\nimport os\nimport random\n\nimport numpy as np\nfrom PIL import Image\n\nfrom util import mkdir_if_not_exist\n\nVIS_GT_DIR_DIC = {\n \"city\": \"/data/unagi0/watanabe/DomainAdaptation/Segmentation/VisDA2017/cityscapes_vis_gt/val\",\n \"city16\": \"/data/unagi0/watanabe/DomainAdaptation/Segmentation/VisDA2017/cityscapes16_vis_gt/val\"\n}\nRGB_IMG_DIR_DIC = {\n \"city\": \"/data/unagi0/watanabe/DomainAdaptation/Segmentation/VisDA2017/cityscapes_val_imgs\",\n \"city16\": \"/data/unagi0/watanabe/DomainAdaptation/Segmentation/VisDA2017/cityscapes_val_imgs\"\n}\n\nparser = argparse.ArgumentParser(description='Visualize Some Results')\nparser.add_argument('dataset', choices=[\"gta\", \"city\", \"test\", \"ir\", \"city16\"])\nparser.add_argument('--n_img', type=int, default=5)\nparser.add_argument('--pred_vis_dirs', type=str, nargs='+',\n help='result directory that visualized pngs')\nparser.add_argument('--outdir', type=str, default=\"vis_comparison\")\nparser.add_argument(\"--rand_sample\", action=\"store_true\",\n help='whether you sample results randomly')\n\nargs = parser.parse_args()\n\nrgb_dir = RGB_IMG_DIR_DIC[args.dataset]\nvis_gt_dir = VIS_GT_DIR_DIC[args.dataset]\n\nif args.rand_sample:\n rgbfn_list = os.listdir(rgb_dir)\nelse:\n pickup_id_list = [\n \"lindau_000006_000019\",\n \"frankfurt_000001_021406\",\n \"frankfurt_000001_041074\",\n \"frankfurt_000001_002512\",\n \"frankfurt_000000_009688\",\n \"frankfurt_000001_040575\",\n \"munster_000050_000019\"\n ]\n rgbfn_list = [x + \"_leftImg8bit.png\" for x in pickup_id_list]\n\npickup_rgbfn_list = random.sample(rgbfn_list, args.n_img)\nprint (\"pickup filename list\")\nprint (pickup_rgbfn_list)\n\nall_img_list = []\nfor rgbfn in pickup_rgbfn_list:\n full_rgbfn = os.path.join(rgb_dir, rgbfn)\n\n gtfn = rgbfn.replace(\"leftImg8bit\", \"gtFine_gtlabels\")\n full_gtfn = os.path.join(vis_gt_dir, gtfn)\n\n one_column_img_list = []\n one_column_img_list.append(Image.open(full_rgbfn))\n\n one_column_img_list.append(Image.open(full_gtfn))\n\n for pred_vis_dir in args.pred_vis_dirs:\n full_predfn = os.path.join(pred_vis_dir, rgbfn)\n one_column_img_list.append(Image.open(full_predfn))\n\n all_img_list.append(one_column_img_list)\n\n\ndef concat_imgs(imgs):\n n_row = len(imgs[0])\n n_col = len(imgs)\n w, h = imgs[0][0].size\n\n merged_img = Image.new('RGB', (w * n_col, h * n_row))\n for col in range(n_col):\n for row in range(n_row):\n merged_img.paste(imgs[col][row], (w * col, h * row))\n\n return merged_img\n\n\nres = concat_imgs(all_img_list)\nsize = np.array(res.size)\nres = res.resize(size / 8)\n\nmkdir_if_not_exist(args.outdir)\nshortened_pickup_rgbfn_list = [x.replace(\"_leftImg8bit.png\", \"\") for x in pickup_rgbfn_list]\npickup_str = \"-\".join(shortened_pickup_rgbfn_list) + \".pdf\"\noutfn = os.path.join(args.outdir, pickup_str)\nres.save(outfn)\nprint (\"Successfully saved result to %s\" % outfn)\n"
]
| [
[
"numpy.array"
]
]
|
ShireFolk/tensorflow | [
"ca6ce24f81887c299ab5e9883083380c8e95701e"
]
| [
"tensorflow/python/distribute/values_test.py"
]
| [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the distributed values library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport itertools\nimport os\nfrom absl.testing import parameterized\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.saved_model.model_utils import mode_keys\nfrom tensorflow.python.tpu import tpu_strategy_util\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training.tracking import util as trackable_utils\nfrom tensorflow.python.util import nest\n\n\nclass DistributedValuesTest(test.TestCase):\n\n def testGetEager(self):\n one = constant_op.constant(1)\n two = constant_op.constant(2)\n v = values.DistributedValues((one, two))\n self.assertEqual(one, v.get())\n with distribute_lib.ReplicaContext(None, 1):\n self.assertEqual(two, v.get())\n\n def testGetGraph(self):\n with context.graph_mode(), ops.Graph().as_default():\n one = constant_op.constant(1)\n two = constant_op.constant(2)\n v = values.DistributedValues((one, two))\n self.assertEqual(one, v.get())\n with distribute_lib.ReplicaContext(None, 1):\n self.assertEqual(two, v.get())\n\n def testIsTensorLike(self):\n with context.graph_mode(), ops.Graph().as_default():\n one = constant_op.constant(1)\n two = constant_op.constant(2)\n v = values.DistributedValues((one, two))\n self.assertTrue(v.is_tensor_like)\n self.assertTrue(tensor_util.is_tensor(v))\n\n def testIsTensorLikeWithAConstant(self):\n with context.graph_mode(), ops.Graph().as_default():\n one = constant_op.constant(1)\n two = 2.0\n v = values.DistributedValues((one, two))\n self.assertFalse(v.is_tensor_like)\n self.assertFalse(tensor_util.is_tensor(v))\n\n\nclass DistributedDelegateTest(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def testGetAttr(self):\n class Foo(object):\n\n def __init__(self, x):\n self.x = x\n\n v = values.DistributedDelegate((Foo(7), Foo(8)))\n self.assertEqual(7, v.x)\n with self.assertRaises(AttributeError):\n _ = v.y\n\n @test_util.run_in_graph_and_eager_modes\n def testOperatorOverride(self):\n v = values.DistributedDelegate((7, 8))\n # v should act like int(7).\n self.assertEqual(8, v + 1)\n self.assertEqual(10, 3 + v)\n self.assertEqual(14, v + v)\n self.assertEqual(5, v - 2)\n self.assertEqual(6, 13 - v)\n self.assertEqual(0, v - v)\n self.assertEqual(14, v * 2)\n self.assertEqual(21, 3 * v)\n self.assertEqual(49, v * v)\n self.assertEqual(3.5, v / 2)\n self.assertEqual(1.5, 10.5 / v)\n self.assertEqual(3, v // 2)\n self.assertEqual(2, 15 // v)\n self.assertEqual(1, v % 2)\n self.assertEqual(2, 16 % v)\n # pylint: disable=g-generic-assert\n self.assertTrue(v < 12)\n self.assertTrue(v <= 12)\n self.assertFalse(v > 12)\n self.assertFalse(v >= 12)\n self.assertFalse(12 < v)\n self.assertFalse(12 <= v)\n self.assertTrue(12 > v)\n self.assertTrue(12 >= v)\n # pylint: enable=g-generic-assert\n self.assertEqual(3, v & 3)\n self.assertEqual(3, 11 & v)\n self.assertEqual(15, v | 8)\n self.assertEqual(23, 16 | v)\n self.assertEqual(4, v ^ 3)\n self.assertEqual(12, 11 ^ v)\n self.assertEqual(343, pow(v, 3))\n self.assertEqual(3, pow(v, 3, 10))\n self.assertEqual(128, pow(2, v))\n self.assertEqual(-7, -v)\n self.assertEqual(~7, ~v)\n self.assertEqual(7, abs(v))\n with self.assertRaises(TypeError):\n _ = v[2]\n\n\ndef _device_str(d):\n return \"/device:GPU:\" + str(d)\n\n\ndef _nested_value(d):\n return (\"a\" + d, [\"b\" + d, {\"c\": \"d\" + d, \"e\": \"f\" + d}, \"g\" + d], \"h\" + d)\n\n\ndef _make_mirrored_val(init_val=5.0):\n v = []\n devices = [\"/device:GPU:0\", \"/device:CPU:0\"]\n for d, _ in zip(devices, [\"v\", \"v/replica\"]):\n with ops.device(d):\n v.append(constant_op.constant(init_val))\n return values.Mirrored(v)\n\n\ndef _make_mirrored():\n v = []\n devices = [\"/device:GPU:0\", \"/device:CPU:0\"]\n for d, n, init in zip(devices, [\"v\", \"v/replica\"], [1., 2.]):\n with ops.device(d):\n v.append(variable_scope.get_variable(\n name=n, initializer=init, use_resource=True))\n mirrored = values.MirroredVariable(\n None, v, variable_scope.VariableAggregation.SUM)\n return mirrored\n\n\nclass RegroupAndSelectDeviceTest(test.TestCase):\n\n def _is_per_replica(self, result, expected, klass=values.PerReplica):\n self.assertIsInstance(result, klass)\n for i, exp in enumerate(expected):\n self.assertEqual(exp, result.values[i])\n\n def testNested(self):\n result = values.regroup((_nested_value(\"1\"), _nested_value(\"2\")))\n self.assertIsInstance(result, tuple)\n self.assertEqual(3, len(result))\n self._is_per_replica(result[0], [\"a1\", \"a2\"])\n self._is_per_replica(result[2], [\"h1\", \"h2\"])\n\n self.assertIsInstance(result[1], list)\n self.assertEqual(3, len(result[1]))\n self._is_per_replica(result[1][0], [\"b1\", \"b2\"])\n self._is_per_replica(result[1][2], [\"g1\", \"g2\"])\n\n self.assertIsInstance(result[1][1], dict)\n self.assertEqual(set([\"c\", \"e\"]), set(result[1][1].keys()))\n self._is_per_replica(result[1][1][\"c\"], [\"d1\", \"d2\"])\n self._is_per_replica(result[1][1][\"e\"], [\"f1\", \"f2\"])\n\n # Also test that we can undo the merge using select_replica()\n self.assertEqual(_nested_value(\"1\"),\n values.select_replica(0, result))\n self.assertEqual(_nested_value(\"2\"),\n values.select_replica(1, result))\n # select_device_mirrored() should fail due to non-mirrored values\n with self.assertRaises(TypeError):\n values.select_replica_mirrored(0, result)\n with self.assertRaises(TypeError):\n values.select_replica_mirrored(1, result)\n\n def testWrapClass(self):\n # Normally a mirrored value would be the same across devices, but\n # for a test it is convenient to be able to tell the values apart.\n result = values.regroup((_nested_value(\"1\"), _nested_value(\"2\")),\n values.Mirrored)\n self.assertIsInstance(result, tuple)\n self.assertEqual(3, len(result))\n self._is_per_replica(result[0], [\"a1\", \"a2\"], values.Mirrored)\n self._is_per_replica(result[2], [\"h1\", \"h2\"], values.Mirrored)\n\n self.assertIsInstance(result[1], list)\n self.assertEqual(3, len(result[1]))\n self._is_per_replica(result[1][0], [\"b1\", \"b2\"], values.Mirrored)\n self._is_per_replica(result[1][2], [\"g1\", \"g2\"], values.Mirrored)\n\n self.assertIsInstance(result[1][1], dict)\n self.assertEqual(set([\"c\", \"e\"]), set(result[1][1].keys()))\n self._is_per_replica(result[1][1][\"c\"], [\"d1\", \"d2\"], values.Mirrored)\n self._is_per_replica(result[1][1][\"e\"], [\"f1\", \"f2\"], values.Mirrored)\n\n # Also test that we can undo the merge using select_replica()\n self.assertEqual(_nested_value(\"1\"),\n values.select_replica(0, result))\n self.assertEqual(_nested_value(\"2\"),\n values.select_replica(1, result))\n # Values are marked as mirrored, so select_device_mirrored() is allowed.\n self.assertEqual(_nested_value(\"1\"),\n values.select_replica_mirrored(0, result))\n self.assertEqual(_nested_value(\"2\"),\n values.select_replica_mirrored(1, result))\n\n def testWrapAListOfTwoTuples(self):\n result = values.regroup([(\"1\", \"2\"), (\"3\", \"4\")])\n self.assertIsInstance(result, tuple)\n self.assertEqual(2, len(result))\n self._is_per_replica(result[0], (\"1\", \"3\"), values.PerReplica)\n self._is_per_replica(result[1], (\"2\", \"4\"), values.PerReplica)\n\n def testMirroredContainer(self):\n if context.num_gpus() < 1 and context.executing_eagerly():\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n mirrored = _make_mirrored()\n result = values.regroup(mirrored.values)\n self.assertIs(mirrored, result)\n\n def testSameId(self):\n foo = object()\n result = values.regroup(((\"a\", foo), (\"b\", foo)))\n self.assertIsInstance(result, tuple)\n self.assertEqual(2, len(result))\n self._is_per_replica(result[0], [\"a\", \"b\"])\n self.assertIs(foo, result[1])\n\n # Test select_replica(), should undo the merge done by regroup().\n result_0 = values.select_replica(0, result)\n self.assertIsInstance(result_0, tuple)\n self.assertEqual(2, len(result_0))\n self.assertEqual(\"a\", result_0[0])\n self.assertIs(foo, result_0[1])\n result_1 = values.select_replica(1, result)\n self.assertIsInstance(result_1, tuple)\n self.assertEqual(2, len(result_1))\n self.assertEqual(\"b\", result_1[0])\n self.assertIs(foo, result_1[1])\n\n def testOneDevice(self):\n result = values.regroup((_nested_value(\"1\"),))\n # On one device regroup() and select_replica() are basically identity.\n self.assertEqual(_nested_value(\"1\"), result)\n self.assertEqual(_nested_value(\"1\"),\n values.select_replica(0, result))\n\n # The one exception has to do with MirroredVariables.\n d = \"/device:CPU:0\"\n with ops.device(d):\n v = variable_scope.get_variable(\n name=\"v\", initializer=1., use_resource=True)\n mirrored = values.MirroredVariable(None, (v,),\n variable_scope.VariableAggregation.SUM)\n result = values.regroup((v,))\n self.assertIs(mirrored, result)\n\n def testNamedTuple(self):\n\n # We include toy implementations of Scaffold and EstimatorSpec to\n # avoid a dependency on Estimator here.\n\n class Scaffold(object):\n pass\n\n class EstimatorSpec(collections.namedtuple(\n \"EstimatorSpec\", [\"mode\", \"loss\", \"train_op\", \"scaffold\"])):\n\n def __new__(cls, mode, loss, train_op, scaffold=None):\n return super(EstimatorSpec, cls).__new__(\n cls, mode=mode, loss=loss, train_op=train_op,\n scaffold=scaffold or Scaffold())\n\n with context.graph_mode(), ops.Graph().as_default():\n created_estimator_specs = []\n\n for device_id in range(3):\n spec = EstimatorSpec(\n mode=mode_keys.EstimatorModeKeys.TRAIN,\n loss=constant_op.constant(device_id / 2),\n train_op=array_ops.identity(constant_op.constant(device_id)))\n created_estimator_specs.append(spec)\n\n merged_estimator_spec = values.regroup(created_estimator_specs)\n\n self.assertIsInstance(merged_estimator_spec, EstimatorSpec)\n self.assertEqual(mode_keys.EstimatorModeKeys.TRAIN,\n merged_estimator_spec.mode)\n for device_id in range(3):\n self.assertEqual(created_estimator_specs[device_id].loss,\n merged_estimator_spec.loss.values[device_id])\n self.assertEqual(created_estimator_specs[device_id].train_op,\n merged_estimator_spec.train_op.values[device_id])\n # Scaffold is populated by `EstimatorSpec.__new__`.\n self.assertEqual(created_estimator_specs[device_id].scaffold,\n merged_estimator_spec.scaffold.values[device_id])\n self.assertIsInstance(created_estimator_specs[device_id].scaffold,\n Scaffold)\n # Also test that we can undo the merge using select_replica()\n self.assertEqual(created_estimator_specs[device_id],\n values.select_replica(device_id,\n merged_estimator_spec))\n\n\nclass MirroredVariableTest(test.TestCase, parameterized.TestCase):\n\n config = config_pb2.ConfigProto()\n config.allow_soft_placement = True\n\n @test_util.run_in_graph_and_eager_modes(config=config)\n def testProperties(self):\n if context.num_gpus() < 1 and context.executing_eagerly():\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n\n mirrored = _make_mirrored()\n v = mirrored.values[0]\n self.assertEqual(v.name, mirrored.name)\n self.assertEqual(v.dtype, mirrored.dtype)\n self.assertEqual(v.shape, mirrored.shape)\n\n @test_util.run_in_graph_and_eager_modes(config=config)\n def testVariableOnAnotherDevice(self):\n v = variable_scope.get_variable(\n name=\"v\", initializer=[1.], use_resource=True)\n mirrored = values.MirroredVariable(\n None, (v,), variable_scope.VariableAggregation.MEAN)\n\n self.assertEqual(v.name, mirrored.name)\n self.assertEqual(v.dtype, mirrored.dtype)\n self.assertEqual(v.shape, mirrored.shape)\n\n def _assign_mirrored(self, v, new):\n for var, n in zip(v.values, new):\n self.evaluate(var.assign(n))\n\n def _save_return_saver(self, sess, var):\n saver = saver_lib.Saver(var_list=[var])\n test_dir = self.get_temp_dir()\n prefix = os.path.join(test_dir, \"ckpt\")\n return saver.save(sess, prefix), saver\n\n def _save(self, sess, var):\n save_path, _ = self._save_return_saver(sess, var)\n return save_path\n\n @test_util.run_in_graph_and_eager_modes(config=config)\n def testSaveAndRestoreMirroredOneGraph(self):\n if context.num_gpus() < 1 and context.executing_eagerly():\n # Graph mode can work without GPU because the Placer \"moves\" the\n # variable to a CPU. In other words, if there is no GPU available, but\n # user requested to create a variable on GPU, Placer will ignore the\n # user request and assign the VarHandleOp to CPU. This requires\n # soft_placement, which is on by default.\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n\n with self.cached_session(config=self.config) as sess:\n mirrored = _make_mirrored()\n v = mirrored.values\n\n # Overwrite the initial values.\n self._assign_mirrored(mirrored, [3., 4.])\n\n # Saves the current value of v[0], 3.\n save_path, saver = self._save_return_saver(sess, mirrored)\n\n # Change the values between save and restore.\n self._assign_mirrored(mirrored, [5., 6.])\n\n # Restores the saved value of 3. to both variables.\n saver.restore(sess, save_path)\n self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))\n\n def _save_mirrored(self):\n \"\"\"Save variables with mirroring, returns save_path.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n mirrored = _make_mirrored()\n\n # Overwrite the initial values.\n self._assign_mirrored(mirrored, [3., 4.])\n\n # Saves the current value of v[0], 3.\n save_path = self._save(sess, mirrored)\n\n # Change the values between save and restore.\n self._assign_mirrored(mirrored, [5., 6.])\n return save_path\n\n def _save_normal(self):\n \"\"\"Save variables without mirroring, returns save_path.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n var = variable_scope.get_variable(\n name=\"v\", initializer=1., use_resource=True)\n\n # Overwrite the initial value.\n self.evaluate(var.assign(3.))\n\n # Saves the current value of var, 3.\n save_path = self._save(sess, var)\n\n # Change the values between save and restore.\n self.evaluate(var.assign(5.))\n return save_path\n\n def _restore_normal(self, save_path):\n \"\"\"Restore to variables without mirroring in a fresh graph.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n var = variable_scope.get_variable(\n name=\"v\", initializer=7., use_resource=True)\n\n # Overwrite the initial value.\n self.evaluate(var.assign(8.))\n\n # Restores the saved value of 3. to `var`.\n saver = saver_lib.Saver(var_list=[var])\n saver.restore(sess, save_path)\n self.assertEqual(3., self.evaluate(var))\n\n def _restore_mirrored(self, save_path):\n \"\"\"Restore to variables with mirroring in a fresh graph.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n mirrored = _make_mirrored()\n v = mirrored.values\n\n # Overwrite the initial values.\n self._assign_mirrored(mirrored, [7., 8.])\n\n # Restores the saved value of 3. to both variables.\n saver = saver_lib.Saver(var_list=[mirrored])\n saver.restore(sess, save_path)\n self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))\n\n @test_util.run_in_graph_and_eager_modes(config=config)\n def testSaveMirroredRestoreMirrored(self):\n if context.num_gpus() < 1 and context.executing_eagerly():\n # Graph mode can work without GPU because the Placer \"moves\" the\n # variable to a CPU. In other words, if there is no GPU available, but\n # user requested to create a variable on GPU, Placer will ignore the\n # user request and assign the VarHandleOp to CPU. This requires\n # soft_placement, which is on by default.\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n\n save_path = self._save_mirrored()\n self._restore_mirrored(save_path)\n\n @test_util.run_in_graph_and_eager_modes(config=config)\n def testSaveMirroredRestoreNormal(self):\n if context.num_gpus() < 1 and context.executing_eagerly():\n # Graph mode can work without GPU because the Placer \"moves\" the\n # variable to a CPU. In other words, if there is no GPU available, but\n # user requested to create a variable on GPU, Placer will ignore the\n # user request and assign the VarHandleOp to CPU. This requires\n # soft_placement, which is on by default.\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n\n save_path = self._save_mirrored()\n self._restore_normal(save_path)\n\n @test_util.run_in_graph_and_eager_modes(config=config)\n def testSaveNormalRestoreMirrored(self):\n if context.num_gpus() < 1 and context.executing_eagerly():\n # Graph mode can work without GPU because the Placer \"moves\" the\n # variable to a CPU. In other words, if there is no GPU available, but\n # user requested to create a variable on GPU, Placer will ignore the\n # user request and assign the VarHandleOp to CPU. This requires\n # soft_placement, which is on by default.\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n\n save_path = self._save_normal()\n self._restore_mirrored(save_path)\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_one_gpu,\n ],\n mode=[\"graph\"]))\n def testFetchAMirroredVariable(self, distribution):\n with self.session(graph=ops.Graph()) as sess, distribution.scope():\n with ops.device(\"/device:GPU:0\"):\n v = variable_scope.get_variable(\n name=\"v\", initializer=1., use_resource=True)\n mirrored = values.MirroredVariable(\n distribution, (v,), variable_scope.VariableAggregation.MEAN)\n sess.run(variables_lib.global_variables_initializer())\n sess.run({\"complicated\": mirrored})\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n ],\n mode=[\"graph\", \"eager\"]))\n def testAssignOutOfScope_mirrored(self, distribution):\n with distribution.scope():\n mirrored = variables_lib.Variable(1.)\n if not isinstance(mirrored, values.MirroredVariable):\n self.assertIsInstance(mirrored, values.TPUMirroredVariable)\n self.evaluate(mirrored.assign(3.))\n self.assertEqual(self.evaluate(mirrored.read_value()), 3.)\n for component in mirrored.values:\n self.assertEqual(self.evaluate(component.read_value()), 3.)\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.central_storage_strategy_with_two_gpus\n ],\n mode=[\"graph\", \"eager\"]))\n def testAssignOutOfScope_aggregating(self, distribution):\n with distribution.scope():\n aggregating = variables_lib.Variable(1.)\n self.assertIsInstance(aggregating, values.AggregatingVariable)\n self.evaluate(aggregating.assign(3.))\n self.assertEqual(self.evaluate(aggregating.read_value()), 3.)\n self.assertEqual(self.evaluate(aggregating._v.read_value()), 3.)\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n mode=[\"graph\", \"eager\"]))\n def testExtendsVariable(self, distribution):\n with distribution.scope():\n v = variables_lib.Variable(1.)\n self.assertIsInstance(v, variables_lib.Variable)\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n mode=[\"graph\", \"eager\"]))\n def testCheckpointing(self, distribution):\n with distribution.scope():\n v = variables_lib.Variable(constant_op.constant([1., 2., 3., 4]))\n\n self.evaluate(v.initializer)\n before_save = self.evaluate(v.read_value())\n\n # Save random weights into checkpoint.\n checkpoint = trackable_utils.Checkpoint(v=v)\n prefix = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.test_session():\n save_path = checkpoint.save(prefix)\n\n # Assign inverted value.\n self.evaluate(v.assign(constant_op.constant([4., 3., 2., 1.])))\n after_assign = self.evaluate(v.read_value())\n self.assertNotAllClose(before_save, after_assign)\n\n # Restore from the checkpoint.\n with self.test_session():\n checkpoint.restore(save_path).assert_consumed().run_restore_ops()\n after_restore = self.evaluate(v)\n self.assertAllClose(before_save, after_restore)\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n mode=[\"graph\"]))\n def testTraceback(self, distribution):\n with distribution.scope():\n variable_scope.get_variable(\n name=\"testVar\", initializer=1., use_resource=True)\n with self.assertRaisesRegex(\n ValueError, \"Variable testVar already exists\"):\n variable_scope.get_variable(\n name=\"testVar\", initializer=1., use_resource=True)\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n mode=[\"eager\"]))\n def testInitializedToSameValueInsideEagerRun(self, distribution):\n v = [None]\n @def_function.function\n def step():\n def f():\n if v[0] is None:\n v[0] = variables_lib.Variable(random_ops.random_normal([]))\n distribution.experimental_run_v2(f)\n\n context.set_global_seed(None)\n step()\n vals = self.evaluate(v[0].values)\n self.assertAllEqual(vals[0], vals[1])\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n mode=[\"graph\", \"eager\"]))\n def testSelectReplica(self, distribution):\n with distribution.scope():\n v = variables_lib.Variable(1.)\n self.assertIs(v, values.select_replica(0, v))\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n mode=[\"graph\", \"eager\"]))\n def testModAfterAssign(self, distribution):\n with distribution.scope():\n v = variables_lib.Variable(0)\n def replica_fn():\n def merge_fn(_):\n return math_ops.mod(v.assign_add(1), 2)\n return distribution_strategy_context.get_replica_context().merge_call(\n merge_fn)\n\n @def_function.function\n def foo():\n distribution.experimental_run_v2(replica_fn)\n\n foo()\n\n\n_TPU_STRATEGIES = (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)\n\n\ndef _make_replica_local(method, strategy=None):\n if strategy is None:\n devices = (\"/device:GPU:0\", \"/device:CPU:0\")\n else:\n devices = strategy.extended.worker_devices\n\n v = []\n for d, n, init in zip(devices, [\"v\", \"v/replica\"], [1., 2.]):\n with ops.device(d):\n v.append(variable_scope.get_variable(\n name=n, initializer=init, use_resource=True))\n\n if (strategy is not None) and isinstance(strategy, _TPU_STRATEGIES):\n var_cls = values.TPUSyncOnReadVariable\n else:\n var_cls = values.SyncOnReadVariable\n replica_local = var_cls(strategy, v, method)\n return v, replica_local\n\n\nclass SyncOnReadVariablePropertiesTest(test.TestCase):\n\n config = config_pb2.ConfigProto()\n config.allow_soft_placement = True\n\n @test_util.run_in_graph_and_eager_modes(config=config)\n def testProperties(self):\n if context.num_gpus() < 1 and context.executing_eagerly():\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n v, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.SUM)\n\n self.assertEqual(v[0].name, replica_local.name)\n self.assertEqual(v[0].dtype, replica_local.dtype)\n self.assertEqual(v[0].shape, replica_local.shape)\n self.assertEqual(variable_scope.VariableAggregation.SUM,\n replica_local.aggregation)\n\n def testTensorConversion(self):\n with context.graph_mode():\n _, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.SUM)\n converted = ops.convert_to_tensor(replica_local, as_ref=False)\n self.assertIsInstance(converted, ops.Tensor)\n self.assertEqual(converted.dtype, replica_local.dtype)\n\n converted = ops.convert_to_tensor(replica_local, as_ref=True)\n # Resources variable are converted to tensors as well when as_ref is True.\n self.assertIsInstance(converted, ops.Tensor)\n self.assertEqual(converted.dtype, replica_local.dtype)\n\n @test_util.run_v2_only\n def testCanPassToDefFun(self):\n @def_function.function\n def add1(x):\n return x + 1\n\n v = variable_scope.get_variable(\n name=\"v\", initializer=[1.], use_resource=True)\n replica_local = values.SyncOnReadVariable(\n None, (v,), variable_scope.VariableAggregation.MEAN)\n self.assertEqual(2., self.evaluate(add1(replica_local)))\n\n\ndef mirrored_and_tpu_strategy_combinations():\n return combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n ],\n mode=[\"graph\", \"eager\"])\n\n\ndef strategy_and_run_tf_function_combinations():\n # Test the combination of different strategies and whether a tf.function\n # is passed into strategy.experimental_run_v2.\"\"\"\n return combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n ],\n mode=[\"graph\", \"eager\"],\n experimental_run_tf_function=[True, False]) + combinations.combine(\n distribution=[\n strategy_combinations.tpu_strategy,\n ],\n mode=[\"graph\", \"eager\"],\n experimental_run_tf_function=[True])\n\n\nclass SyncOnReadVariableTest(test.TestCase, parameterized.TestCase):\n\n def _assign_replica_local(self, v, new):\n for var, n in zip(v, new):\n with ops.device(var.device):\n self.evaluate(var.assign(n))\n\n def _save_return_saver(self, sess, var):\n saver = saver_lib.Saver(var_list=[var])\n test_dir = self.get_temp_dir()\n prefix = os.path.join(test_dir, \"ckpt\")\n return saver.save(sess, prefix), saver\n\n def _save(self, sess, var):\n save_path, _ = self._save_return_saver(sess, var)\n return save_path\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveAndRestoreReplicaLocalSumOneGraph(self, distribution):\n with self.cached_session() as sess:\n v, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.SUM, distribution)\n\n # Overwrite the initial values.\n self._assign_replica_local(v, [3., 4.])\n\n with distribution.scope():\n # Saves the current value of v[0] + v[1], 7.\n save_path, saver = self._save_return_saver(sess, replica_local)\n\n # Change the values between save and restore.\n self._assign_replica_local(v, [5., 6.])\n\n # Restores the saved value of 7. which gets divided equally\n # between the variables.\n saver.restore(sess, save_path)\n self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveAndRestoreReplicaLocalMeanOneGraph(self, distribution):\n if context.num_gpus() < 1 and context.executing_eagerly():\n self.skipTest(\"A GPU is not available for this test in eager mode.\")\n\n with self.cached_session() as sess:\n v, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.MEAN, distribution)\n\n # Overwrite the initial values.\n self._assign_replica_local(v, [3., 4.])\n\n with distribution.scope():\n # Saves the current value of (v[0] + v[1])/2, 3.5.\n save_path, saver = self._save_return_saver(sess, replica_local)\n\n # Change the values between save and restore.\n self._assign_replica_local(v, [5., 6.])\n\n # Restores the saved value of 3.5 to both variables.\n saver.restore(sess, save_path)\n self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))\n\n def _save_replica_local_mean(self, distribution):\n \"\"\"Save variables with mirroring, returns save_path.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n v, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.MEAN, distribution)\n\n # Overwrite the initial values.\n self._assign_replica_local(v, [3., 4.])\n\n with distribution.scope():\n # Saves the current value of (v[0] + v[1])/2, 3.5\n save_path = self._save(sess, replica_local)\n\n # Change the values between save and restore.\n self._assign_replica_local(v, [5., 6.])\n return save_path\n\n def _save_replica_local_sum(self, distribution):\n \"\"\"Save variables with mirroring, returns save_path.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n v, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.SUM, distribution)\n\n # Overwrite the initial values.\n self._assign_replica_local(v, [1.5, 2.])\n\n with distribution.scope():\n # Saves the current value of v[0] + v[1], 3.5\n save_path = self._save(sess, replica_local)\n\n # Change the values between save and restore.\n self._assign_replica_local(v, [5., 6.])\n return save_path\n\n def _save_normal(self):\n \"\"\"Save variables without mirroring, returns save_path.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n var = variable_scope.get_variable(\n name=\"v\", initializer=1., use_resource=True)\n\n # Overwrite the initial value.\n self.evaluate(var.assign(3.5))\n\n # Saves the current value of var, 3.5.\n save_path = self._save(sess, var)\n\n # Change the values between save and restore.\n self.evaluate(var.assign(5.))\n return save_path\n\n def _restore_normal(self, save_path):\n \"\"\"Restore to variables without mirroring in a fresh graph.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n var = variable_scope.get_variable(\n name=\"v\", initializer=7., use_resource=True)\n\n # Overwrite the initial value.\n self.evaluate(var.assign(8.))\n\n # Restores the saved value of 3.5 to `var`.\n saver = saver_lib.Saver(var_list=[var])\n saver.restore(sess, save_path)\n self.assertEqual(3.5, self.evaluate(var))\n\n def _restore_replica_local_mean(self, save_path, distribution):\n \"\"\"Restore to variables with mirroring in a fresh graph.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n v, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.MEAN, distribution)\n\n # Overwrite the initial values.\n self._assign_replica_local(v, [7., 8.])\n\n with distribution.scope():\n # Restores the saved value of 3.5 to both variables.\n saver = saver_lib.Saver(var_list=[replica_local])\n saver.restore(sess, save_path)\n self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))\n\n def _restore_replica_local_sum(self, save_path, distribution):\n \"\"\"Restore to variables with mirroring in a fresh graph.\"\"\"\n with self.session(graph=ops.Graph()) as sess:\n v, replica_local = _make_replica_local(\n variable_scope.VariableAggregation.SUM, distribution)\n\n # Overwrite the initial values.\n self._assign_replica_local(v, [7., 8.])\n\n with distribution.scope():\n # Restores the saved value of 3.5 to both variables.\n saver = saver_lib.Saver(var_list=[replica_local])\n saver.restore(sess, save_path)\n self.assertEqual([1.75, 1.75], self.evaluate([v[0], v[1]]))\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveReplicaLocalRestoreReplicaLocalMean(self, distribution):\n save_path = self._save_replica_local_mean(distribution)\n self._restore_replica_local_mean(save_path, distribution)\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveReplicaLocalRestoreReplicaLocalSum(self, distribution):\n save_path = self._save_replica_local_sum(distribution)\n self._restore_replica_local_sum(save_path, distribution)\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveReplicaLocalMeanRestoreNormal(self, distribution):\n save_path = self._save_replica_local_mean(distribution)\n self._restore_normal(save_path)\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveReplicaLocalSumRestoreNormal(self, distribution):\n save_path = self._save_replica_local_sum(distribution)\n self._restore_normal(save_path)\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveNormalRestoreReplicaLocalMean(self, distribution):\n save_path = self._save_normal()\n self._restore_replica_local_mean(save_path, distribution)\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testSaveNormalRestoreReplicaLocalSum(self, distribution):\n save_path = self._save_normal()\n self._restore_replica_local_sum(save_path, distribution)\n\n @combinations.generate(strategy_and_run_tf_function_combinations())\n def testAssign(self, distribution, experimental_run_tf_function):\n\n def assign(fn, v, update_value, cross_replica):\n update_fn = lambda: getattr(v, fn)(update_value)\n if cross_replica:\n return update_fn()\n else:\n if experimental_run_tf_function:\n update_fn = def_function.function(update_fn)\n return distribution.experimental_local_results(\n distribution.experimental_run_v2(update_fn))\n updates = [(\"assign\", 1.), (\"assign_add\", 1.), (\"assign_sub\", -1.)]\n aggregations = [\n variables_lib.VariableAggregation.NONE,\n variables_lib.VariableAggregation.SUM,\n variables_lib.VariableAggregation.MEAN,\n variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,\n ]\n options = ( # VariableAggregation.SUM in cross-replica mode is tested below\n [x for x in itertools.product(updates, aggregations, [True, False])\n if not(x[1] == variables_lib.VariableAggregation.SUM and x[2])])\n for update, aggregation, cross_replica in options:\n with distribution.scope():\n v = variable_scope.variable(\n 0.,\n synchronization=variables_lib.VariableSynchronization.ON_READ,\n aggregation=aggregation)\n self.evaluate(variables_lib.global_variables_initializer())\n fn, update_value = update\n self.evaluate(assign(fn, v, update_value, cross_replica))\n for component in v._values:\n self.assertAllEqual(self.evaluate(component.read_value()),\n self.evaluate(array_ops.ones_like(component)))\n\n @combinations.generate(strategy_and_run_tf_function_combinations())\n def testAssignDtypeConversion(self, distribution,\n experimental_run_tf_function):\n\n def assign(fn, v, update_value, cross_replica):\n update_fn = lambda: getattr(v, fn)(update_value)\n if cross_replica:\n return update_fn()\n else:\n if experimental_run_tf_function:\n update_fn = def_function.function(update_fn)\n return distribution.experimental_local_results(\n distribution.experimental_run_v2(update_fn))\n updates = [(\"assign\", 1), (\"assign_add\", 1), (\"assign_sub\", -1)]\n aggregations = [\n variables_lib.VariableAggregation.NONE,\n variables_lib.VariableAggregation.SUM,\n variables_lib.VariableAggregation.MEAN,\n variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,\n ]\n options = ( # VariableAggregation.SUM in cross-replica mode is tested below\n [x for x in itertools.product(updates, aggregations, [True, False])\n if not(x[1] == variables_lib.VariableAggregation.SUM and x[2])])\n for update, aggregation, cross_replica in options:\n with distribution.scope():\n v = variable_scope.variable(\n 0.,\n synchronization=variables_lib.VariableSynchronization.ON_READ,\n aggregation=aggregation)\n self.evaluate(variables_lib.global_variables_initializer())\n fn, update_value = update\n self.evaluate(assign(fn, v, update_value, cross_replica))\n for component in v._values:\n self.assertAllEqual(self.evaluate(component.read_value()),\n self.evaluate(array_ops.ones_like(component)))\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testAssignWithAggregationSum(self, distribution):\n with distribution.scope():\n v = variable_scope.variable(\n 0.,\n synchronization=variables_lib.VariableSynchronization.ON_READ,\n aggregation=variables_lib.VariableAggregation.SUM)\n self.evaluate(variables_lib.global_variables_initializer())\n self.evaluate(v.assign(1. * distribution.num_replicas_in_sync))\n for component in v._values:\n self.assertAllEqual(self.evaluate(component.read_value()),\n self.evaluate(array_ops.ones_like(component)))\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testAssignAddSubWithAggregationSum(self, distribution):\n with distribution.scope():\n v = variable_scope.variable(\n 0.,\n synchronization=variables_lib.VariableSynchronization.ON_READ,\n aggregation=variables_lib.VariableAggregation.SUM)\n self.evaluate(variables_lib.global_variables_initializer())\n with self.assertRaisesRegex(\n ValueError, \"SyncOnReadVariable does not support \"):\n self.evaluate(v.assign_add(1.))\n with self.assertRaisesRegex(\n ValueError, \"SyncOnReadVariable does not support \"):\n self.evaluate(v.assign_sub(1.))\n\n @combinations.generate(strategy_and_run_tf_function_combinations())\n def testReadValueInReplicaContext(self, distribution,\n experimental_run_tf_function):\n aggregations = [\n variables_lib.VariableAggregation.NONE,\n variables_lib.VariableAggregation.SUM,\n variables_lib.VariableAggregation.MEAN,\n variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,\n ]\n for aggregation in aggregations:\n with distribution.scope():\n v = variable_scope.variable(\n 0.,\n synchronization=variables_lib.VariableSynchronization.ON_READ,\n aggregation=aggregation)\n self.evaluate(variables_lib.global_variables_initializer())\n if experimental_run_tf_function:\n read_var_fn = def_function.function(v.read_value)\n else:\n read_var_fn = v.read_value\n results = self.evaluate(\n distribution.experimental_local_results(\n distribution.experimental_run_v2(read_var_fn)))\n for component, value in zip(v._values, results):\n self.assertAllEqual(self.evaluate(component.read_value()), value)\n\n @combinations.generate(strategy_and_run_tf_function_combinations())\n def testReadValueInCrossReplicaContext(self, distribution,\n experimental_run_tf_function):\n aggregations = [\n variables_lib.VariableAggregation.SUM,\n variables_lib.VariableAggregation.MEAN,\n variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,\n ]\n for aggregation in aggregations:\n if isinstance(distribution, _TPU_STRATEGIES):\n resolver = tpu_cluster_resolver.TPUClusterResolver('')\n tpu_strategy_util.initialize_tpu_system(resolver)\n with distribution.scope():\n v = variable_scope.variable(\n 0.,\n synchronization=variables_lib.VariableSynchronization.ON_READ,\n aggregation=aggregation)\n self.evaluate(variables_lib.global_variables_initializer())\n\n def assign(v=v):\n ctx = distribution_strategy_context.get_replica_context()\n replica_id = ctx.replica_id_in_sync_group\n return v.assign(math_ops.cast(replica_id, dtypes.float32))\n\n if experimental_run_tf_function:\n assign = def_function.function(assign)\n\n self.evaluate(distribution.experimental_local_results(\n distribution.experimental_run_v2(assign)))\n result = self.evaluate(v.read_value())\n num_replicas = distribution.num_replicas_in_sync\n sum_of_replica_values = num_replicas * (num_replicas - 1) / 2.\n if aggregation == variables_lib.VariableAggregation.SUM:\n expected = sum_of_replica_values\n elif aggregation == variables_lib.VariableAggregation.MEAN:\n expected = sum_of_replica_values / num_replicas\n else:\n expected = 0\n self.assertEqual(expected, result, aggregation)\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testReadValueWithAggregationNoneInCrossReplicaContext(self, distribution):\n with distribution.scope():\n v = variable_scope.variable(\n 0.,\n synchronization=variables_lib.VariableSynchronization.ON_READ,\n aggregation=variables_lib.VariableAggregation.NONE)\n self.evaluate(variables_lib.global_variables_initializer())\n with self.assertRaisesRegex(\n ValueError, \"Could not convert from .* VariableAggregation\\\\.NONE\"):\n self.evaluate(v.read_value())\n\n @combinations.generate(mirrored_and_tpu_strategy_combinations())\n def testInitializedToSameValueInsideEagerRun(self, distribution):\n if not context.executing_eagerly(): self.skipTest(\"eager only\")\n\n v = [None]\n @def_function.function\n def step():\n def f():\n if v[0] is None:\n v[0] = variables_lib.Variable(\n random_ops.random_normal([]),\n synchronization=variables_lib.VariableSynchronization.ON_READ)\n distribution.experimental_run_v2(f)\n\n context.set_global_seed(None)\n step()\n vals = self.evaluate(v[0].values)\n self.assertAllEqual(vals[0], vals[1])\n\n\nclass MirroredTest(test.TestCase):\n\n def testAddOp(self):\n if context.num_gpus() < 1:\n self.skipTest(\"A GPU is not available for this test.\")\n mirrored_val = _make_mirrored_val(init_val=3.)\n\n self.assertEqual(self.evaluate(constant_op.constant(6.)),\n self.evaluate(mirrored_val + mirrored_val))\n self.assertEqual(self.evaluate(constant_op.constant(4.)),\n self.evaluate(mirrored_val + 1))\n self.assertEqual(self.evaluate(mirrored_val + 1),\n self.evaluate(math_ops.add(mirrored_val, 1)))\n self.assertEqual(type(mirrored_val + 1),\n type(math_ops.add(mirrored_val, 1)))\n\n\nclass PerReplicaTest(test.TestCase, parameterized.TestCase):\n\n def testTypeSpec(self):\n vals = (constant_op.constant(1.),)\n per_replica = values.PerReplica(vals)\n\n spec = per_replica._type_spec\n self.assertEqual(spec._value_specs,\n (tensor_spec.TensorSpec([], dtypes.float32),))\n\n def testTypeSpecRoundTrip(self):\n vals = (constant_op.constant(1.),)\n per_replica = values.PerReplica(vals)\n\n spec = per_replica._type_spec\n tensor_list = spec._to_components(per_replica)\n reconstructed = spec._from_components(tensor_list)\n\n self.assertAllEqual(per_replica.values, reconstructed.values)\n\n def testTypeSpecNest(self):\n vals = (constant_op.constant(1.), constant_op.constant([5., 6.0]),)\n per_replica = values.PerReplica(vals)\n\n # Note: nest.map_structutre exercises nest.flatten and\n # nest.pack_sequence_as.\n result = nest.map_structure(\n lambda t: t + 10, per_replica, expand_composites=True)\n\n self.assertLen(result.values, 2)\n self.assertAllEqual(result.values[0], 11.)\n self.assertAllEqual(result.values[1], [15., 16.0])\n\n @test_util.run_in_graph_and_eager_modes\n def testIsGraphTensor(self):\n per_replica = values.PerReplica((constant_op.constant(1.),))\n for t in nest.flatten(per_replica, expand_composites=True):\n self.assertEqual(hasattr(t, \"graph\"), not context.executing_eagerly())\n\n def testDoesNotTriggerFunctionTracing(self):\n traces = []\n\n @def_function.function\n def f(x):\n traces.append(None) # Only happens on trace.\n return x\n\n per_replica = values.PerReplica((constant_op.constant(1.),))\n\n # Trace once.\n f(per_replica)\n self.assertNotEmpty(traces)\n del traces[:]\n\n per_replica_spec = per_replica._type_spec\n for _ in range(5):\n vals = per_replica_spec._to_components(per_replica)\n vals = [v * 2 for v in vals]\n per_replica = per_replica_spec._from_components(vals)\n\n output = f(per_replica)\n self.assertIsInstance(output, values.PerReplica)\n self.assertAllEqual(output._values, per_replica._values)\n self.assertEmpty(traces) # Make sure we're not re-tracing `f`.\n\n def testFunctionCanReturnPerReplica(self):\n f = def_function.function(lambda x: x)\n x = values.PerReplica((constant_op.constant(1.),))\n y = f(x)\n self.assertIsNot(x, y)\n nest.map_structure(self.assertAllEqual, x, y, expand_composites=True)\n self.assertEqual(x._type_spec, y._type_spec)\n\n @test_util.run_in_graph_and_eager_modes\n def testCondWithTensorValues(self):\n per_replica_1 = values.PerReplica((constant_op.constant(\"a\"),))\n per_replica_2 = values.PerReplica((constant_op.constant([\"b\", \"c\"]),))\n condition = array_ops.placeholder_with_default(True, [])\n\n result = control_flow_ops.cond(\n condition, lambda: per_replica_1, lambda: per_replica_2)\n\n self.assertLen(result.values, 1)\n self.assertAllEqual(result.values[0], \"a\")\n\n @test_util.run_in_graph_and_eager_modes\n def testCondWithValuesConvertibleToTensor(self):\n per_replica_1 = values.PerReplica((\"a\",))\n per_replica_2 = values.PerReplica((\"b\",))\n condition = array_ops.placeholder_with_default(True, [])\n\n result = control_flow_ops.cond(\n condition, lambda: per_replica_1, lambda: per_replica_2)\n\n self.assertLen(result.values, 1)\n self.assertAllEqual(result.values[0], \"a\")\n\n @test_util.build_as_function_and_v1_graph\n def testCondWithValuesNotConvertibleToTensor(self):\n per_replica_1 = values.PerReplica(({\"a\"},))\n per_replica_2 = values.PerReplica(({\"b\", \"c\"},))\n condition = array_ops.placeholder(dtypes.bool, [])\n\n with self.assertRaisesRegex(TypeError, \"Could not build a TypeSpec for\"):\n control_flow_ops.cond(\n condition, lambda: per_replica_1, lambda: per_replica_2)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
]
| [
[
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.distribute.values.PerReplica",
"tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.ops.random_ops.random_normal",
"tensorflow.python.distribute.values.MirroredVariable",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.distribute.distribute_lib.ReplicaContext",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.variable_scope.variable",
"tensorflow.python.tpu.tpu_strategy_util.initialize_tpu_system",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.eager.test.main",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.distribute.values.DistributedValues",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.python.eager.context.set_global_seed",
"tensorflow.python.distribute.values.select_replica",
"tensorflow.python.distribute.values.regroup",
"tensorflow.python.distribute.values.select_replica_mirrored",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.distribute.values.DistributedDelegate",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.distribute.values.SyncOnReadVariable",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.distribute.values.Mirrored"
]
]
|
alexandonian/paint-by-word | [
"40213a597f4ecbc8cf95abe5a6cb856dda01baef"
]
| [
"paintbyword/utils/dissect.py"
]
| [
"import torch\nimport re\nimport copy\nimport numpy\nfrom torch.utils.data.dataloader import default_collate\nfrom netdissect import nethook, imgviz, tally, unravelconv, upsample\n\n\ndef acts_image(model, dataset,\n layer=None, unit=None,\n thumbsize=None,\n cachedir=None,\n return_as='strip', # or individual, or tensor\n k=100, r=4096, q=0.01,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n assert return_as in ['strip', 'individual', 'tensor']\n topk, rq, run = acts_stats(model, dataset, layer=layer, unit=unit,\n k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,\n sample_size=sample_size, cachedir=cachedir)\n result = window_images(dataset, topk, rq, run,\n thumbsize=thumbsize, return_as=return_as, k=k, q=q,\n cachedir=cachedir)\n if unit is not None and not hasattr(unit, '__len__'):\n result = result[0]\n return result\n\n\ndef grad_image(model, dataset,\n layer=None, unit=None,\n thumbsize=None,\n cachedir=None,\n return_as='strip', # or individual, or tensor\n k=100, r=4096, q=0.01,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n assert return_as in ['strip', 'individual', 'tensor']\n topk, botk, rq, run = grad_stats(model, dataset, layer=layer, unit=unit,\n k=max(200, k), r=r,\n batch_size=batch_size, num_workers=num_workers,\n sample_size=sample_size, cachedir=cachedir)\n result = window_images(dataset, topk, rq, run,\n thumbsize=thumbsize, return_as=return_as, k=k, q=q,\n cachedir=cachedir)\n if unit is not None and not hasattr(unit, '__len__'):\n result = result[0]\n return result\n\n\ndef update_image(model, dataset,\n layer=None, unit=None,\n thumbsize=None,\n cachedir=None,\n return_as='strip', # or individual, or tensor\n k=100, r=4096, q=0.01,\n cinv=None,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n assert return_as in ['strip', 'individual', 'tensor']\n topk, botk, rq, run = update_stats(model, dataset, layer=layer, unit=unit,\n k=max(200, k), r=r, cinv=cinv,\n batch_size=batch_size, num_workers=num_workers,\n sample_size=sample_size, cachedir=cachedir)\n result = window_images(dataset, topk, rq, run,\n thumbsize=thumbsize, return_as=return_as, k=k, q=q,\n cachedir=cachedir)\n if unit is not None and not hasattr(unit, '__len__'):\n result = result[0]\n return result\n\n\ndef proj_image(model, dataset,\n layer=None, unit=None,\n thumbsize=None,\n cachedir=None,\n return_as='strip', # or individual, or tensor\n k=100, r=4096, q=0.01,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n assert return_as in ['strip', 'individual', 'tensor']\n topk, botk, rq, run = proj_stats(model, dataset, layer=layer, unit=unit,\n k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,\n sample_size=sample_size, cachedir=cachedir)\n result = window_images(dataset, topk, rq, run,\n thumbsize=thumbsize, return_as=return_as, k=k, q=q,\n cachedir=cachedir)\n if unit is not None and not hasattr(unit, '__len__'):\n result = result[0]\n return result\n\n\ndef acts_stats(model, dataset,\n layer=None, unit=None,\n cachedir=None,\n k=100, r=4096,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n assert not model.training\n if unit is not None:\n if not hasattr(unit, '__len__'):\n unit = [unit]\n assert unit is None or len(unit) > 0\n if layer is not None:\n module = nethook.get_module(model, layer)\n else:\n module = model\n device = next(model.parameters()).device\n pin_memory = (device.type != 'cpu')\n\n def run(x, *args):\n with nethook.Trace(module, stop=True) as ret, torch.no_grad():\n model(x.to(device))\n r = ret.output\n if unit is not None:\n r = r[:, unit]\n return r\n run.name = 'acts'\n\n def compute_samples(batch, *args):\n r = run(batch)\n flat_r = r.view(r.shape[0], r.shape[1], -1)\n top_r = flat_r.max(2)[0]\n all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])\n return top_r, all_r\n topk, rq = tally.tally_topk_and_quantile(\n compute_samples, dataset, k=k, r=r,\n batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size,\n cachefile=f'{cachedir}/acts_topk_rq.npz' if cachedir else None)\n return topk, rq, run\n\n\ndef grad_stats(model, dataset, layer,\n unit=None,\n cachedir=None,\n k=100, r=4096,\n batch_size=10,\n sample_size=None,\n num_workers=30,\n ):\n assert not model.training\n if unit is not None:\n if not hasattr(unit, '__len__'):\n unit = [unit]\n assert unit is None or len(unit) > 0\n # Make a copy so we can disable grad on parameters\n cloned_model = copy.deepcopy(model)\n nethook.set_requires_grad(False, cloned_model)\n if layer is not None:\n module = nethook.get_module(cloned_model, layer)\n else:\n module = cloned_model\n device = next(cloned_model.parameters()).device\n pin_memory = (device.type != 'cpu')\n\n def run(x, y, *args):\n with nethook.Trace(module, retain_grad=True) as ret, (\n torch.enable_grad()):\n out = cloned_model(x.to(device))\n r = ret.output\n loss = torch.nn.functional.cross_entropy(out, y.to(device))\n loss.backward()\n r = -r.grad\n if unit is not None:\n r = r[:, unit]\n return r\n run.name = 'grad'\n\n def compute_samples(x, y, *args):\n r = run(x, y)\n flat_r = r.view(r.shape[0], r.shape[1], -1)\n top_r = flat_r.max(2)[0]\n bot_r = flat_r.min(2)[0]\n all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])\n return top_r, bot_r, all_r\n topk, botk, rq = tally.tally_extremek_and_quantile(\n compute_samples, dataset, k=k, r=r,\n batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size,\n cachefile=f'{cachedir}/grad_exk_rq.npz' if cachedir else None)\n return topk, botk, rq, run\n\n\ndef weight_grad(model, dataset, layer,\n unit=None,\n cachedir=None,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n # Make a copy so we can disable grad on parameters\n cloned_model = copy.deepcopy(model)\n nethook.set_requires_grad(False, cloned_model)\n module = nethook.get_module(cloned_model, layer)\n nethook.set_requires_grad(True, module)\n device = next(cloned_model.parameters()).device\n pin_memory = (device.type != 'cpu')\n\n def accumulate_grad(x, y, *args):\n with torch.enable_grad():\n out = cloned_model(x.to(device))\n loss = torch.nn.functional.cross_entropy(out, y.to(device))\n loss.backward()\n\n def weight_grad():\n return dict(wgrad=module.weight.grad)\n module.weight.grad = None\n wg = tally.tally_each(accumulate_grad, dataset, summarize=weight_grad,\n batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size,\n cachefile=f'{cachedir}/weight_grad.npz' if cachedir else None)['wgrad']\n return wg\n\n\ndef update_stats(model, dataset, layer,\n unit=None,\n cachedir=None,\n k=100, r=4096,\n batch_size=10,\n cinv=None,\n sample_size=None,\n num_workers=30,\n ):\n assert not model.training\n if unit is not None:\n if not hasattr(unit, '__len__'):\n unit = [unit]\n assert unit is None or len(unit) > 0\n # get weight grad (assumes layer has a weight param)\n wg = weight_grad(model, dataset, layer,\n cachedir=cachedir,\n batch_size=batch_size,\n sample_size=sample_size,\n num_workers=num_workers)\n if cinv is not None:\n wg = torch.mm(wg.view(-1,\n cinv.shape[0]).cpu(),\n cinv.cpu()).view(wg.shape)\n # copy the model so we can change its weights.\n cloned_model = copy.deepcopy(model)\n nethook.set_requires_grad(False, cloned_model)\n module = nethook.get_module(cloned_model, layer)\n device = next(cloned_model.parameters()).device\n pin_memory = (device.type != 'cpu')\n with torch.no_grad():\n module.weight[...] = -wg.to(device)\n if hasattr(module, 'bias') and module.bias is not None:\n module.bias[...] = 0\n\n def run(x, *args):\n with nethook.Trace(module, stop=True) as ret, torch.no_grad():\n cloned_model(x.to(device))\n r = ret.output\n if unit is not None:\n r = r[:, unit]\n return r\n run.name = 'update' if cinv is None else 'proj'\n\n def compute_samples(batch, *args):\n r = run(batch)\n flat_r = r.view(r.shape[0], r.shape[1], -1)\n top_r = flat_r.max(2)[0]\n bot_r = flat_r.min(2)[0]\n all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])\n return top_r, bot_r, all_r\n topk, botk, rq = tally.tally_extremek_and_quantile(\n compute_samples, dataset, k=k, r=r,\n batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size,\n cachefile=f'{cachedir}/{run.name}_exk_rq.npz' if cachedir else None)\n return topk, botk, rq, run\n\n\ndef proj_c2m(model, dataset, layer,\n cachedir=None,\n batch_size=10,\n sample_size=None,\n num_workers=30,\n ):\n assert not model.training\n device = next(model.parameters()).device\n pin_memory = (device.type != 'cpu')\n cloned_model = copy.deepcopy(model)\n module = nethook.get_module(cloned_model, layer)\n assert isinstance(module, torch.nn.Conv2d)\n nethook.set_requires_grad(False, cloned_model)\n unraveled = unravelconv.unravel_left_conv2d(module)\n unraveled.wconv.weight.requires_grad = True\n unraveled.wconv.weight.grad = None\n nethook.replace_module(cloned_model, layer, unraveled)\n tconv = unraveled.tconv\n\n def ex_run(x, *args):\n with nethook.Trace(tconv, stop=True) as unrav:\n cloned_model(x.to(device))\n return unrav.output\n\n def ex_sample(x, *args):\n r = ex_run(x, *args)\n return r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])\n c2m = tally.tally_second_moment(ex_sample,\n dataset,\n batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size,\n cachefile=f'{cachedir}/input_cov_moment.npz' if cachedir else None)\n return c2m, ex_run\n\n\ndef proj_stats(model, dataset, layer,\n unit=None,\n cachedir=None,\n k=100, r=4096,\n batch_size=10,\n sample_size=None,\n num_workers=30,\n ):\n c2m, ex_run = proj_c2m(model, dataset, layer,\n batch_size=batch_size, sample_size=sample_size,\n cachedir=cachedir)\n # old obsolete method - not stable.\n # Cinv = c2m.momentPSD().cholesky_inverse()\n moment = c2m.moment()\n # TODO: consider uncommenting the following, which uses\n # correlation for a better-conditioned inverse.\n # Change 2.0 to 3.0 to reduce amplifying near-zero feats.\n # rn = moment.diag().clamp(1e-30).pow(-1/2.0)\n # moment = moment * rn[None,:] * rn[:,None]\n # The following is standard regularization, to try.\n # moment.diagonal.add_(1e-3)\n Cinv = moment.pinverse()\n\n return update_stats(model, dataset, layer, unit=unit,\n cinv=Cinv,\n k=k, r=r, batch_size=batch_size, sample_size=sample_size,\n cachedir=cachedir)\n\n\ndef window_images(dataset, topk, rq, run,\n thumbsize=None,\n return_as='strip', # or individual, or tensor\n k=None, q=0.01,\n border_color=None,\n vizname=None,\n cachedir=None):\n assert return_as in ['strip', 'individual', 'tensor']\n input_sample = default_collate([dataset[0]])\n r_sample = run(*input_sample)\n x_size = tuple(input_sample[0].shape[2:])\n if thumbsize is None:\n thumbsize = x_size\n if not isinstance(thumbsize, (list, tuple)):\n thumbsize = (thumbsize, thumbsize)\n if topk is None:\n topk = tally.range_topk(r_sample.size(1), size=(k or 1))\n default_vizname = 'top' if topk.largest else 'bot'\n if border_color in ['red', 'green', 'yellow']:\n default_vizname += border_color\n border_color = dict(red=[255.0, 0.0, 0.0], green=[0.0, 255.0, 0.0],\n yellow=[255.0, 255.0, 0.0])[border_color]\n if vizname is None:\n vizname = default_vizname\n iv = imgviz.ImageVisualizer(\n thumbsize, image_size=x_size, source=dataset,\n level=rq.quantiles((1.0 - q) if topk.largest else q))\n func = dict(\n strip=iv.masked_images_for_topk,\n individual=iv.individual_masked_images_for_topk,\n tensor=iv.masked_image_grid_for_topk)[return_as]\n acts_images = func(run, dataset, topk, k=k, largest=topk.largest,\n border_color=border_color,\n cachefile=f'{cachedir}/{vizname}{k or \"\"}images.npz' if cachedir else None)\n return acts_images\n\n\ndef label_stats(dataset_with_seg, num_seglabels,\n run, level, upfn=None,\n negate=False,\n cachedir=None,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n # Create upfn\n data_sample = default_collate([dataset_with_seg[0]])\n input_sample = data_sample[:-2] + data_sample[-1:]\n seg_sample = data_sample[-2]\n r_sample = run(*input_sample)\n r_size = tuple(r_sample.shape[2:])\n seg_size = tuple(seg_sample.shape[2:])\n device = r_sample.device\n pin_memory = (device.type != 'cpu')\n if upfn is None:\n upfn = upsample.upsampler(seg_size, r_size)\n\n def compute_concept_pair(batch, seg, *args):\n seg = seg.to(device)\n acts = run(batch, *args)\n hacts = upfn(acts)\n iacts = (hacts < level if negate else hacts > level) # indicator\n iseg = torch.zeros(seg.shape[0], num_seglabels,\n seg.shape[2], seg.shape[3],\n dtype=torch.bool, device=seg.device)\n iseg.scatter_(dim=1, index=seg, value=1)\n flat_segs = iseg.permute(0, 2, 3, 1).reshape(-1, iseg.shape[1])\n flat_acts = iacts.permute(0, 2, 3, 1).reshape(-1, iacts.shape[1])\n return flat_segs, flat_acts\n neg = 'neg' if negate else ''\n iu99 = tally.tally_all_intersection_and_union(\n compute_concept_pair,\n dataset_with_seg,\n sample_size=sample_size,\n num_workers=num_workers, pin_memory=pin_memory,\n cachefile=f'{cachedir}/{neg}{run.name}_iu.npz' if cachedir else None)\n return iu99\n\n\ndef topk_label_stats(dataset_with_seg, num_seglabels,\n run, level, topk, k=None,\n upfn=None,\n negate=False,\n cachedir=None,\n batch_size=10,\n sample_size=None,\n num_workers=30):\n # Create upfn\n data_sample = default_collate([dataset_with_seg[0]])\n input_sample = data_sample[:-2] + data_sample[-1:]\n seg_sample = data_sample[-2]\n r_sample = run(*input_sample)\n r_size = tuple(r_sample.shape[2:])\n seg_size = tuple(seg_sample.shape[2:])\n device = r_sample.device\n num_units = r_sample.shape[1]\n pin_memory = (device.type != 'cpu')\n if upfn is None:\n upfn = upsample.upsampler(seg_size, r_size)\n intersections = torch.zeros(num_units, num_seglabels).to(device)\n unions = torch.zeros(num_units, num_seglabels).to(device)\n\n def collate_unit_iou(units, imgs, seg, labels):\n seg = seg.to(device)\n acts = run(imgs, labels)\n hacts = upfn(acts)\n iacts = (hacts > level) # indicator\n iseg = torch.zeros(seg.shape[0], num_seglabels,\n seg.shape[2], seg.shape[3],\n dtype=torch.bool, device=seg.device)\n iseg.scatter_(dim=1, index=seg, value=1)\n for i in range(len(imgs)):\n ulist = units[i]\n for unit, _ in ulist:\n im_i = (iacts[i, unit][None] & iseg[i]).view(\n num_seglabels, -1).float().sum(1)\n im_u = (iacts[i, unit][None] | iseg[i]).view(\n num_seglabels, -1).float().sum(1)\n intersections[unit] += im_i\n unions[unit] += im_u\n return []\n tally.gather_topk(collate_unit_iou, dataset_with_seg, topk, k=100)\n return intersections / (unions + 1e-20)\n\n### Experiment below - find the best representative with gradient in the consensus directioin.\n# 1. Tally weight grad over the dataset.\n# 2. For each unit, find the topk images with gradients in the same direction as this\n# consensus weight grad.\n\ndef wgrad_stats(model, dataset, layer, cachedir=None,\n k=100, r=4096,\n batch_size=10,\n sample_size=None,\n num_workers=30,\n ):\n assert not model.training\n if layer is not None:\n module = nethook.get_module(model, layer)\n else:\n module = model\n device = next(model.parameters()).device\n pin_memory = (device.type != 'cpu')\n\n cloned_model = copy.deepcopy(model)\n nethook.set_requires_grad(False, cloned_model)\n module = nethook.get_module(cloned_model, layer)\n module.weight.requires_grad = True\n module.weight.grad = None\n\n wg = weight_grad(model, dataset, layer,\n cachedir=cachedir,\n batch_size=batch_size,\n sample_size=sample_size,\n num_workers=num_workers)\n wg = wg.to(device)\n\n module.weight.requires_grad = False\n ks = module.kernel_size\n unfolder = torch.nn.Conv2d(\n in_channels=module.in_channels, out_channels=module.out_channels,\n kernel_size=ks, padding=module.padding,\n dilation=module.dilation, stride=module.stride,\n bias=False)\n nethook.set_requires_grad(False, unfolder)\n unfolder.to(device)\n unfolder.weight[...] = wg\n\n def run(x, y, *args, return_details=False):\n with nethook.Trace(module, retain_grad=True, retain_input=True) as ret, (\n torch.enable_grad()):\n out = cloned_model(x.to(device))\n r = ret.output\n inp = ret.input\n loss = torch.nn.functional.cross_entropy(out, y.to(device))\n loss.backward()\n # The contribution to the weight gradient from every patch.\n # If we were to sum unfgrad.sum(dim=(0,5,6)) it would equal module.weight.grad\n # Now to reduce things, we need to score it per-patch somehow. We will dot-product\n # the average grad per-unit to see which patches push most in the consensus direction.\n # This gives a per-unit score at every patch.\n score = unfolder(inp) * r.grad\n\n # Hack: it is interesting to separate the cases where rgrad is positive\n # (the patch should look more like this to decrease the loss) from cases\n # where it is negative (where the patch should look less like this. So\n # we will drop cases here the score is negative, and then negate the\n # score when ograd is negative.\n signed_score = score.clamp(0) * (r.grad.sign())\n\n if return_details:\n return {k: v.detach().cpu() for k, v in dict(\n model_output=out,\n loss=loss,\n layer_output=r,\n layer_output_grad=r.grad,\n layer_input=inp,\n layer_input_by_Edw=unfolder(inp),\n weight_grad=wg,\n score=score,\n signed_score=signed_score).items()}\n\n return signed_score\n\n # Equivalent unrolled code below.\n # scores = []\n # for i in range(0, len(unf), 2):\n # ug = unf[i:i+2,None,:,:,:,:,:] * r.grad[i:i+2,:,None,None,None,:,:]\n # # Now to reduce things, we need to score it per-patch somehow. We will dot-product\n # # the average grad per-unit to see which patches push most in the consensus direction.\n # # This gives a per-unit score at every patch.\n # score = (ug * wg[None,:,:,:,:,None,None]\n # ).view(ug.shape[0], ug.shape[1], -1, ug.shape[5], ug.shape[6]).sum(2)\n # scores.append(score)\n # return torch.cat(scores)\n run.name = 'wgrad'\n\n def compute_samples(batch, labels, *args):\n score = run(batch, labels)\n flat_score = score.view(score.shape[0], score.shape[1], -1)\n top_score = flat_score.max(2)[0]\n bot_score = flat_score.min(2)[0]\n all_score = score.permute(0, 2, 3, 1).reshape(-1, score.shape[1])\n return top_score, bot_score, all_score\n topk, botk, rq = tally.tally_extremek_and_quantile(\n compute_samples, dataset, k=k, r=r,\n batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size,\n cachefile=f'{cachedir}/swgrad_exk_rq.npz' if cachedir else None)\n return topk, botk, rq, run\n\n### Experiment below:\n# tally p-v times every post-relu activation in a layer\n# and also sum up every activation\n# This is intended to measure how well a (simple linear) model\n# of the given feature can help solve the error p-v.\n\ndef sep_stats(model, dataset, layer=None, cachedir=None,\n batch_size=10, sample_size=None, num_workers=30):\n assert not model.training\n if layer is not None:\n module = nethook.get_module(model, layer)\n else:\n module = model\n device = next(model.parameters()).device\n pin_memory = (device.type != 'cpu')\n\n def run(x, labels, *args):\n with nethook.Trace(module) as ret, torch.no_grad():\n logits = model(x.to(device))\n labels = labels.to(device)\n r = ret.output\n p = torch.nn.functional.softmax(logits, dim=1)\n y = torch.zeros_like(p)\n y.scatter_(1, labels[:,None], 1)\n return r, p, y\n def compute_samples(batch, labels, *args):\n r, p, y = run(batch, labels)\n err = p-y\n sep_t = torch.cat((err, y, torch.ones(err.shape[0], 1, device=device)), dim=1)\n flat_r = r.view(r.shape[0], r.shape[1], -1).mean(2)[:,:,None]\n r_times_sep_t = flat_r * sep_t[:,None,:]\n # Number of stats to track is units * (classes + 1)\n sep_data = r_times_sep_t.view(len(batch), -1)\n return sep_data\n sepmv = tally.tally_mean(\n compute_samples, dataset,\n batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size,\n cachefile=f'{cachedir}/sep_stats.npz' if cachedir else None)\n return sepmv\n\n"
]
| [
[
"torch.zeros",
"torch.no_grad",
"torch.enable_grad",
"torch.ones",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.utils.data.dataloader.default_collate"
]
]
|
fujirock/Reinvent | [
"ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616"
]
| [
"unittest_reinvent/scoring_tests/scoring_components/test_qed_score.py"
]
| [
"import unittest\n\nfrom rdkit import Chem\n\nfrom scoring.component_parameters import ComponentParameters\nfrom scoring.score_components import QedScore\nfrom unittest_reinvent.scoring_tests.scoring_components import ScoringTest\nfrom utils.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum\nimport numpy.testing as npt\n\n\nclass Test_qed_score(ScoringTest, unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n sf_enum = ScoringFunctionComponentNameEnum()\n parameters = ComponentParameters(component_type=sf_enum.QED_SCORE,\n name=\"qed_score\",\n weight=1.,\n smiles=[],\n model_path=\"\",\n specific_parameters={})\n cls.component = QedScore(parameters)\n cls.smile = \"C1CCCCCCCCC1\"\n cls.mol = Chem.MolFromSmiles(cls.smile)\n\n def test_molecule_parsed_successfully(self):\n self.assertIsNotNone(self.mol)\n\n def test_invalid_molecule_returns_zero(self):\n score = self.component.calculate_score([None])\n npt.assert_almost_equal(score.total_score[0], 0.0, 4)\n\n def test_one_molecule(self):\n score = self.component.calculate_score([self.mol])\n self.assertEqual(1, len(score.total_score))\n npt.assert_almost_equal(score.total_score[0], 0.4784, 4)\n\n def test_one_molecule_2(self):\n npt.assert_almost_equal(self.score(self.smile), 0.4784, 3)\n\n def test_two_molecules(self):\n score = self.component.calculate_score([self.mol, self.mol])\n self.assertEqual(2, len(score.total_score))\n npt.assert_almost_equal(score.total_score[0], 0.4784, 4)\n npt.assert_almost_equal(score.total_score[1], 0.4784, 4)\n"
]
| [
[
"numpy.testing.assert_almost_equal"
]
]
|
drmcg/cellsegment | [
"69213a1c232d0ba81639080870593089b212d5d4"
]
| [
"cellsegment/json_utils.py"
]
| [
"#AUTOGENERATED! DO NOT EDIT! File to edit: dev/94_json_utils.ipynb (unless otherwise specified).\n\n__all__ = ['createjson', 'TEST_JSON_STRING', 'annotate_json', 'create_json_from_CSV', 'csv_to_json_dir',\n 'resize_json_file', 'resize_json_dir', 'prettyjson', 'getsubitems', 'basictype2str', 'indentitems']\n\n#Cell\nimport random\nfrom cellsegment.core import *\n\nimport json\nimport shutil\nimport pandas\nfrom fastai.vision import get_image_files, parallel, partial, PIL, Path, get_files\nimport numpy as np\n\nfrom PIL import Image, ImageDraw\nimport os\n\n#Cell\ndef createjson(fn='', offset=0):\n 'create a Labelme compatable json file'\n data = {\n 'version': '3.13.1',\n 'flags': {},\n 'shapes': [],\n 'lineColor': [0, 255, 0, 255],\n 'fillColor': [255, 255, 0, 20],\n 'imagePath': fn,\n 'offset': offset,\n 'imageData': None,\n 'imageHeight': 2464,\n 'imageWidth': 2464}\n return data\n\nTEST_JSON_STRING = \"\"\"\n{\n \"version\": \"3.13.1\",\n \"flags\": {},\n \"shapes\": [\n {\n \"label\": \"Liver Fluke\",\n \"line_color\": [0, 0, 255, 127],\n \"fill_color\": [0, 0, 255, 127],\n \"points\": [\n [622.4545454545455, 523.8484848484848], [642.4545454545455, 543.8484848484848]\n ],\n \"shape_type\": \"circle\",\n \"flags\": {}\n }\n ],\n \"lineColor\": [0, 255, 0, 255],\n \"fillColor\": [255, 255, 0, 20],\n \"imagePath\": \"235443 - 1.jpg\",\n \"imageData\": null,\n \"imageHeight\": 813,\n \"imageWidth\": 830,\n \"offset\": 0\n}\n\"\"\"\n\n#Cell\ndef annotate_json(data, shape_type='circle', points=None):\n # Annotate the json file with list of points\n assert shape_type == 'point' or shape_type == 'circle' or shape_type == 'rectangle'\n\n def add_anno(data, item):\n cx, cy = item['point']\n pnt_list = []\n if shape_type == 'point':\n pnt_list = [[cx, cy]]\n elif shape_type == 'rectangle': # bounding points\n r = 40\n pnt_list = [[cx - r, cy - r], [cx + r, cy + r]]\n elif shape_type == 'circle': # center & radius\n r = 20\n pnt_list = [[cx, cy], [cx + r, cy + r]]\n\n probability = str(item['probability']) if 'probability' in item else str(0)\n\n if item['label'] == 1:\n line_color = [255, 0, 0, 127]\n data['shapes'].append({\n \"label\": str(item['label']), \"line_color\": [255, 0, 0, 127], \"fill_color\": [255, 0, 0, 127],\n \"points\": pnt_list, \"shape_type\": shape_type, 'probability': probability\n })\n\n elif item['label'] == 2:\n line_color = [0, 255, 0, 127]\n data['shapes'].append({\n \"label\": str(item['label']), \"line_color\": [0, 255, 0, 127], \"fill_color\": [0, 255, 0, 127],\n \"points\": pnt_list, \"shape_type\": shape_type, 'probability': probability\n })\n\n else:\n line_color = [0, 0, 255, 127]\n data['shapes'].append({\n \"label\": str(item['label']), \"line_color\": [0, 0, 255, 127], \"fill_color\": [0, 0, 255, 127],\n \"points\": pnt_list, \"shape_type\": shape_type, 'probability': probability\n })\n\n def add_annotations(data, points):\n for item in points:\n add_anno(data, item)\n\n add_annotations(data, points)\n return data\n\n#Cell\ndef create_json_from_CSV(csv_fn, load_jpg_fn, offset=0, height=0, width=0):\n # Annotate the json file with predictions\n data = {\n 'version': '3.13.1',\n 'flags': {},\n 'shapes': [],\n 'lineColor': [0, 255, 0, 255],\n 'fillColor': [255, 255, 0, 20],\n 'imagePath': load_jpg_fn,\n 'offset': offset,\n 'imageData': None,\n 'imageHeight': height,\n 'imageWidth': width}\n\n # species_name = ['Nematodirus', 'Strongyle', 'other']\n\n egg_centers = []\n\n if (os.path.isfile(csv_fn)):\n with open(csv_fn, mode='r') as f:\n line_count = 0\n for line in f:\n row, col, species = [int(x) for x in line.split(',')]\n egg_centers.append({\"label\": species, \"particleType\": species, \"point\": [row, col]})\n line_count += 1\n\n data = annotate_json(data, points=egg_centers)\n data\n return data\n\n#Cell\ndef csv_to_json_dir(src_path, dest_path, number_files='all'):\n \"\"\" Convert an entire directory of Techion CSV files to JSON files. Store in the dest directory\n CSV format is x,y,species_number\n 1383,1571,2\n 1687,1822,1\n 2036,1327,1\n \"\"\"\n print('Converting an entire directory of Techion CSV files to JSON files')\n fnames_csv = sorted(get_files(src_path, extensions=['.csv']))\n fnames_jpg = sorted(get_files(src_path, extensions=['.jpg']))\n if isinstance(number_files, int):\n fnames_csv = fnames_csv[:number_files]\n fnames_jpg = fnames_jpg[:number_files]\n print('Number of csv & jpg files to convert', len(fnames_csv), len(fnames_jpg))\n\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n print(f\"src_path {src_path}\")\n print(f\"dest_path {dest_path}\")\n\n for i,fn in enumerate(fnames_jpg):\n parent = Path(fn).parents[0]\n name = Path(fn).name.split('.')[0]\n csv_fn = f\"{parent}/{name}.csv\"\n jpg_fn = f\"{parent}/{name}.jpg\"\n json_fn = f\"{parent}/{name}.json\"\n\n if Path(jpg_fn).is_file():\n img = PIL.Image.open(jpg_fn)\n data = create_json_from_CSV(Path(csv_fn).name, Path(jpg_fn).name, height=img.size[1], width=img.size[0])\n with open(json_fn, 'w') as outfile:\n # json.dump(data, outfile, indent=2)\n # print(f'Saving File {json_fn}')\n json.dump(data, outfile, ensure_ascii=False, indent=4)\n progress_bar(i+1,50)\n\n#Cell\ndef resize_json_file(fn, scale=1, offset=0):\n data = json.load(open(fn))\n\n for s, sh in enumerate(data['shapes']):\n for p, pnt in enumerate(sh[\"points\"]):\n data['shapes'][s]['points'][p][0] = round(data['shapes'][s]['points'][p][0] * scale)\n data['shapes'][s]['points'][p][1] = round(data['shapes'][s]['points'][p][1] * scale)\n data['shapes'][s]['points'][p][0] = data['shapes'][s]['points'][p][0] - offset\n data['imageData'] = None\n data['offset'] = offset\n data['scale'] = scale\n data['imageWidth'] = int (data['imageWidth'] * scale)\n data['imageHeight'] = int (data['imageHeight'] * scale)\n return data\n\n#Cell\ndef resize_json_dir(file_data, src_path, dest_path, number_files='all', height=800):\n \"\"\"\n Resize src_path directory of JSON files and store in dest_path directory\n There needs to be resized jpg files in the dest_path directory.\n An example is 226260 - 1|0.436047|221|.jpg where |0.436047|221| marks the factor and offset factors to apply\n :param src_path: Source path where json files are\n :param dest_path: Destination path to store resized json files\n :param number_files: Number of json files to process, leave empty for all files in directory\n :return: number of file processed\n \"\"\"\n df = pandas.read_csv(file_data, index_col=0)\n __number_files = df.shape[0]\n\n print(f'Number of JSON files: {__number_files}, Number to resize: {number_files}')\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n if isinstance(number_files, int):\n __number_files = number_files\n\n for i in range(__number_files):\n f_stem = df.loc[i,'Name'].split('.')[0]\n scale = float(height) / df.loc[i,'Height']\n offset = 0\n data = resize_json_file(f'{src_path}/{f_stem}.json', scale=float(scale), offset=int(offset))\n data['imagePath'] = df.loc[i,'Name']\n with open(f'{dest_path}/{f_stem}.json', 'w') as outfile:\n json.dump(data, outfile, ensure_ascii=False, indent=4)\n\n progress_bar(i + 1, 50)\n print('')\n print(i+1, ' json files processed')\n return i+1\n\n#Cell\n# https://stackoverflow.com/a/56497521/104668\n\ndef prettyjson(obj, indent=2, maxlinelength=80):\n \"\"\"\n Renders JSON content with indentation and line splits/concatenations to fit maxlinelength.\n Only dicts, lists and basic types are supported\n - https://github.com/andy-gh/prettyjson\n - https://stackoverflow.com/a/56497521/104668\n \"\"\"\n\n items, _ = getsubitems(obj, itemkey=\"\", islast=True, maxlinelength=maxlinelength, level=0)\n return indentitems(items, indent, level=0)\n\n#Cell\n#export\nimport types\ndef getsubitems(obj, itemkey, islast, maxlinelength, level):\n items = []\n is_inline = True # at first, assume we can concatenate the inner tokens into one line\n\n isdict = isinstance(obj, dict)\n islist = isinstance(obj, list)\n istuple = isinstance(obj, tuple)\n isbasictype = not (isdict or islist or istuple)\n\n # build json content as a list of strings or child lists\n if isbasictype:\n # render basic type\n keyseparator = \"\" if itemkey == \"\" else \": \"\n itemseparator = \"\" if islast else \",\"\n items.append(itemkey + keyseparator + basictype2str(obj) + itemseparator)\n\n else:\n # render lists/dicts/tuples\n if isdict: opening, closing, keys = (\"{\", \"}\", iter(obj.keys()))\n elif islist: opening, closing, keys = (\"[\", \"]\", range(0, len(obj)))\n elif istuple: opening, closing, keys = (\"[\", \"]\", range(0, len(obj))) # tuples are converted into json arrays\n\n if itemkey != \"\": opening = itemkey + \": \" + opening\n if not islast: closing += \",\"\n\n count = 0\n itemkey = \"\"\n subitems = []\n\n # get the list of inner tokens\n for (i, k) in enumerate(keys):\n islast_ = i == len(obj)-1\n itemkey_ = \"\"\n if isdict: itemkey_ = basictype2str(k)\n inner, is_inner_inline = getsubitems(obj[k], itemkey_, islast_, maxlinelength, level+1)\n subitems.extend(inner) # inner can be a string or a list\n is_inline = is_inline and is_inner_inline # if a child couldn't be rendered inline, then we are not able either\n\n # fit inner tokens into one or multiple lines, each no longer than maxlinelength\n if is_inline:\n multiline = True\n\n # in Multi-line mode items of a list/dict/tuple can be rendered in multiple lines if they don't fit on one.\n # suitable for large lists holding data that's not manually editable.\n\n # in Single-line mode items are rendered inline if all fit in one line, otherwise each is rendered in a separate line.\n # suitable for smaller lists or dicts where manual editing of individual items is preferred.\n\n # this logic may need to be customized based on visualization requirements:\n if (isdict): multiline = False\n if (islist): multiline = True\n\n if (multiline):\n lines = []\n current_line = \"\"\n current_index = 0\n\n for (i, item) in enumerate(subitems):\n item_text = item\n if i < len(inner)-1: item_text = item + \",\"\n\n if len (current_line) > 0:\n try_inline = current_line + \" \" + item_text\n else:\n try_inline = item_text\n\n if (len(try_inline) > maxlinelength):\n # push the current line to the list if maxlinelength is reached\n if len(current_line) > 0: lines.append(current_line)\n current_line = item_text\n else:\n # keep fitting all to one line if still below maxlinelength\n current_line = try_inline\n\n # Push the remainder of the content if end of list is reached\n if (i == len (subitems)-1): lines.append(current_line)\n\n subitems = lines\n if len(subitems) > 1: is_inline = False\n else: # single-line mode\n totallength = len(subitems)-1 # spaces between items\n for item in subitems: totallength += len(item)\n if (totallength <= maxlinelength):\n str = \"\"\n for item in subitems: str += item + \" \" # insert space between items, comma is already there\n subitems = [ str.strip() ] # wrap concatenated content in a new list\n else:\n is_inline = False\n\n\n # attempt to render the outer brackets + inner tokens in one line\n if is_inline:\n item_text = \"\"\n if len(subitems) > 0: item_text = subitems[0]\n if len(opening) + len(item_text) + len(closing) <= maxlinelength:\n items.append(opening + item_text + closing)\n else:\n is_inline = False\n\n # if inner tokens are rendered in multiple lines already, then the outer brackets remain in separate lines\n if not is_inline:\n items.append(opening) # opening brackets\n items.append(subitems) # Append children to parent list as a nested list\n items.append(closing) # closing brackets\n\n return items, is_inline\n\n\ndef basictype2str(obj):\n if isinstance (obj, str):\n strobj = \"\\\"\" + str(obj) + \"\\\"\"\n elif isinstance(obj, bool):\n strobj = { True: \"true\", False: \"false\" }[obj]\n else:\n strobj = str(obj)\n return strobj\n\n\ndef indentitems(items, indent, level):\n \"\"\"Recursively traverses the list of json lines, adds indentation based on the current depth\"\"\"\n res = \"\"\n indentstr = \" \" * (indent * level)\n for (i, item) in enumerate(items):\n if isinstance(item, list):\n res += indentitems(item, indent, level+1)\n else:\n islast = (i==len(items)-1)\n # no new line character after the last rendered line\n if level==0 and islast:\n res += indentstr + item\n else:\n res += indentstr + item + \"\\n\"\n return res"
]
| [
[
"pandas.read_csv"
]
]
|
hargoniX/CNN-for-Sequential-Text | [
"7a44cfbc389f740ffd67468aa2f002d8dd1e04c8"
]
| [
"cnn_for_sequential_text_classification.py"
]
| [
"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom keras.preprocessing import text, sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.layers.embeddings import Embedding\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import Input, Dense, Dropout, Conv1D, MaxPooling1D, Flatten, Concatenate, BatchNormalization\nfrom keras.models import Model, load_model\n\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\n\nclass CNNSequenceClassifier:\n \"\"\"\n A CNN based classifier for text or other sequences.\n \"\"\"\n def __init__(self, X, Y, epochs: int, embedding_dimension: int = 11 , batch_size: int = 64):\n tokenizer = Tokenizer(char_level=False)\n tokenizer.fit_on_texts(X)\n word_index = tokenizer.word_index\n X = tokenizer.texts_to_sequences(X)\n # Set max length based on previously visualized sequence distributions\n self.max_sequence_length = sum(map(len, X)) // len(X)\n X = sequence.pad_sequences(X, maxlen=self.max_sequence_length)\n\n # Transform class labels to one-hot encodings\n lb = LabelBinarizer()\n Y = lb.fit_transform(Y)\n\n filter_sizes = (3,5,9,15,21)\n conv_blocks = []\n\n embedding_layer = Embedding(\n len(tokenizer.word_index)+1,\n embedding_dimension,\n input_length=self.max_sequence_length\n )\n\n callbacks = [EarlyStopping(monitor='val_accuracy', verbose=1, patience=4)]\n\n sequence_input = Input(shape=(self.max_sequence_length,), dtype='int32')\n embedded_sequences = embedding_layer(sequence_input)\n reshape = Dropout(0.1)(embedded_sequences)\n\n # Add convolutional layer for each filter size\n for size_val in filter_sizes:\n conv = Conv1D(\n filters=32,\n kernel_size=size_val,\n padding='valid',\n activation='relu',\n strides=1)(reshape)\n conv = MaxPooling1D(pool_size=2)(conv)\n conv = Flatten()(conv)\n conv_blocks.append(conv)\n\n merged = Concatenate()(conv_blocks)\n dropout = Dropout(0.25)(merged)\n normalize = BatchNormalization()(dropout)\n output = Dense(256, activation='relu')(normalize)\n normalized_output = BatchNormalization()(output)\n predictions = Dense(4, activation='softmax')(normalized_output)\n self.model = Model(sequence_input, predictions)\n self.model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n )\n\n self.model.fit(\n X, Y,\n batch_size=batch_size,\n verbose=1,\n validation_split=0.15,\n callbacks=callbacks,\n epochs=epochs\n )\n\n def predict(self, X):\n tokenizer = Tokenizer(char_level=False)\n tokenizer.fit_on_texts(X)\n word_index = tokenizer.word_index\n X = tokenizer.texts_to_sequences(X)\n # Set max length based on previously visualized sequence distributions\n X = sequence.pad_sequences(X, maxlen=self.max_sequence_length)\n\n return self.model.predict(X)\n\n def load(self, filename: str):\n self.model = load_model(filename)\n\n def save(self, filename: str):\n self.model.save_weights(filename)\n"
]
| [
[
"sklearn.preprocessing.LabelBinarizer"
]
]
|
phantoms158/Mask-rcnn | [
"60db1355747d2c47b3ff70aa6140d16a1504541e"
]
| [
"mrcnn/model.py"
]
| [
"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport skimage.transform\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f} {}\".format(\n str(array.shape),\n array.min() if array.size else \"\",\n array.max() if array.size else \"\",\n array.dtype))\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [height, width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n pooled = tf.expand_dims(pooled, 0)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n Class-specific bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,\n (dy, dx, log(dh), log(dw), class_id)]\n Class-specific bbox refinements.\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in image coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [N, NUM_CLASSES] classifier logits (before softmax)\n probs: [N, NUM_CLASSES] classifier probabilities\n bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, roi_count, height, width, num_classes]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n # TODO: use smooth_l1_loss() rather than reimplementing here\n # to reduce code duplication\n diff = K.abs(target_bbox - rpn_bbox)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(skimage.transform.resize(\n class_mask, (gt_h, gt_w), order=1, mode=\"constant\")).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = skimage.transform.resize(m, config.MASK_SHAPE, order=1, mode=\"constant\")\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=1,\n use_multiprocessing=False,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name=None):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n"
]
| [
[
"tensorflow.exp",
"numpy.random.choice",
"tensorflow.image.non_max_suppression",
"numpy.copy",
"tensorflow.unique",
"tensorflow.reshape",
"numpy.where",
"tensorflow.sqrt",
"numpy.sort",
"tensorflow.stack",
"tensorflow.control_dependencies",
"numpy.broadcast_to",
"tensorflow.divide",
"tensorflow.cast",
"tensorflow.identity",
"numpy.concatenate",
"numpy.divide",
"tensorflow.shape",
"numpy.empty",
"tensorflow.concat",
"numpy.log",
"tensorflow.argmax",
"tensorflow.image.crop_and_resize",
"tensorflow.Variable",
"tensorflow.transpose",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.squeeze",
"numpy.argmax",
"numpy.random.randint",
"numpy.arange",
"tensorflow.split",
"tensorflow.pad",
"numpy.expand_dims",
"tensorflow.abs",
"numpy.array",
"tensorflow.range",
"tensorflow.minimum",
"numpy.reshape",
"numpy.zeros",
"tensorflow.expand_dims",
"tensorflow.gather_nd",
"tensorflow.round",
"tensorflow.where",
"numpy.delete",
"numpy.random.shuffle",
"tensorflow.map_fn",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.log",
"numpy.stack",
"numpy.amax",
"tensorflow.reduce_sum",
"tensorflow.nn.top_k",
"numpy.hstack",
"tensorflow.boolean_mask",
"tensorflow.logical_and",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.fliplr",
"tensorflow.random_shuffle",
"tensorflow.size",
"tensorflow.multiply",
"numpy.sum",
"numpy.ones",
"tensorflow.equal",
"numpy.any",
"tensorflow.reduce_max",
"numpy.abs",
"tensorflow.gather",
"tensorflow.maximum",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient"
]
]
|
anjandeepsahni/automatic_speech_recognition | [
"181d6435e1846bb137218e260e2f65584e045bd8"
]
| [
"Code/beamsearch.py"
]
| [
"import numpy as np\n\n\n# Node class for beam search.\nclass BeamNode(object):\n def __init__(self, parent, state, value, cost, extras):\n super(BeamNode, self).__init__()\n # value/predicted word for current node.\n self.value = value\n # parent Node, None for root\n self.parent = parent\n # current node's lstm hidden state\n self.state = state\n # cumulative cost of entire chain upto current node.\n self.cum_cost = parent.cum_cost + cost if parent else cost\n # length of entire chain\n self.length = 1 if parent is None else parent.length + 1\n # any extra variables to store for the node\n self.extras = extras\n # to hold the node's entire sequence.\n self._sequence = None\n\n def to_sequence(self):\n # Return sequence of nodes from root to current node.\n if not self._sequence:\n self._sequence = []\n current_node = self\n while current_node:\n self._sequence.insert(0, current_node)\n current_node = current_node.parent\n return self._sequence\n\n def to_sequence_of_values(self):\n return [s.value for s in self.to_sequence()]\n\n def to_sequence_of_extras(self):\n return [s.extras for s in self.to_sequence()]\n\n\ndef beam_search(initial_state_function,\n generate_function,\n X,\n end_id,\n batch_size=1,\n beam_width=4,\n num_hypotheses=1,\n max_length=50,\n vocab_size=33):\n # initial_state_function: A function that takes X as input and returns\n # state (2-dimensonal numpy array with 1 row\n # representing decoder recurrent layer state).\n # generate_function: A function that takes Y_tm1 (1-dimensional numpy array\n # of token indices in decoder vocabulary generated at\n # previous step) and state_tm1 (2-dimensonal numpy array\n # of previous step decoder recurrent layer states) as\n # input and returns state_t (2-dimensonal numpy array of\n # current step decoder recurrent layer states),\n # p_t (2-dimensonal numpy array of decoder softmax\n # outputs) and optional extras (e.g. attention weights\n # at current step).\n # X: List of input token indices in encoder vocabulary.\n # end_id: Index of <end sequence> token in decoder vocabulary.\n # batch_size: Batch size. TBD !\n # beam_width: Beam size. Default 4. (NOTE: Fails for beam > vocab)\n # num_hypotheses: Number of hypotheses to generate. Default 1.\n # max_length: Length limit for generated sequence. Default 50.\n initial_state, initial_value = initial_state_function(X, batch_size)\n next_fringe = [BeamNode(parent=None,\n state=initial_state,\n value=initial_value,\n cost=0.0,\n extras=None)\n ]\n hypotheses = []\n\n for step in range(max_length):\n fringe = []\n for n in next_fringe:\n if (step != 0 and n.value == end_id) or step == max_length - 1:\n hypotheses.append(n)\n else:\n fringe.append(n)\n\n if not fringe or len(hypotheses) >= num_hypotheses:\n # if not fringe:\n break\n\n Y_tm1 = [n.value for n in fringe]\n state_tm1 = [n.state for n in fringe]\n state_t, p_t, extras_t = generate_function(Y_tm1, state_tm1)\n Y_t = np.argsort(\n p_t, axis=1\n )[:, -beam_width:] # no point in taking more than fits in the beam\n next_fringe = []\n for Y_t_n, p_t_n, extras_t_n, state_t_n, n in zip(\n Y_t, p_t, extras_t, state_t, fringe):\n Y_nll_t_n = -np.log(p_t_n[Y_t_n])\n\n for y_t_n, y_nll_t_n in zip(Y_t_n, Y_nll_t_n):\n n_new = BeamNode(parent=n,\n state=state_t_n,\n value=y_t_n,\n cost=y_nll_t_n,\n extras=extras_t_n)\n next_fringe.append(n_new)\n\n next_fringe = sorted(\n next_fringe, key=lambda n: n.cum_cost\n )[:beam_width] # may move this into loop to save memory\n\n hypotheses.sort(key=lambda n: n.cum_cost)\n return hypotheses[:num_hypotheses]\n"
]
| [
[
"numpy.log",
"numpy.argsort"
]
]
|
lmondada/Python-Raytracer | [
"63fc93ced2f4451775c645974e9db979c663abf3"
]
| [
"sightpy/scene.py"
]
| [
"from PIL import Image\nimport numpy as np\nimport time\nimport copy\nfrom multiprocessing import Pool, cpu_count\nfrom .utils import colour_functions as cf\nfrom .camera import Camera\nfrom .utils.constants import *\nfrom .utils.vector3 import vec3, rgb\nfrom .ray import Ray, get_raycolor, get_distances\nfrom . import lights\nfrom .backgrounds.skybox import SkyBox\nfrom .backgrounds.panorama import Panorama\n\n\ndef get_raycolor_tuple(x):\n return get_raycolor(*x)\n\n\ndef batch_rays(rays, batch_size):\n batches = []\n n_rays = len(rays)\n for ray_ind in range(0, n_rays, batch_size):\n batches.append(Ray.concatenate(rays[ray_ind : ray_ind + batch_size]))\n return batches\n\n\nclass Scene:\n def __init__(self, ambient_color=rgb(0.01, 0.01, 0.01), n=vec3(1.0, 1.0, 1.0)):\n # n = index of refraction (by default index of refraction of air n = 1.)\n\n self.scene_primitives = []\n self.collider_list = []\n self.shadowed_collider_list = []\n self.Light_list = []\n self.importance_sampled_list = []\n self.ambient_color = ambient_color\n self.n = n\n self.importance_sampled_list = []\n\n def add_Camera(self, look_from, look_at, **kwargs):\n self.camera = Camera(look_from, look_at, **kwargs)\n\n def add_PointLight(self, pos, color):\n self.Light_list += [lights.PointLight(pos, color)]\n\n def add_DirectionalLight(self, Ldir, color):\n self.Light_list += [lights.DirectionalLight(Ldir.normalize(), color)]\n\n def add(self, primitive, importance_sampled=False):\n self.scene_primitives += [primitive]\n self.collider_list += primitive.collider_list\n\n if importance_sampled == True:\n self.importance_sampled_list += [primitive]\n\n if primitive.shadow == True:\n self.shadowed_collider_list += primitive.collider_list\n\n def add_Background(self, img, light_intensity=0.0, blur=0.0, spherical=False):\n\n primitive = None\n if spherical == False:\n primitive = SkyBox(img, light_intensity=light_intensity, blur=blur)\n else:\n primitive = Panorama(img, light_intensity=light_intensity, blur=blur)\n\n self.scene_primitives += [primitive]\n self.collider_list += primitive.collider_list\n\n def render(self, samples_per_pixel, progress_bar=False, batch_size=None):\n\n print(\"Rendering...\")\n\n t0 = time.time()\n color_RGBlinear = rgb(0.0, 0.0, 0.0)\n\n all_rays = [self.camera.get_ray(self.n) for i in range(samples_per_pixel)]\n\n n_proc = cpu_count()\n rays_per_batch = len(self.camera.get_ray(self.n))\n batch_size = batch_size or np.ceil(samples_per_pixel / n_proc).astype(int)\n\n all_rays_batched = batch_rays(all_rays, batch_size)\n args = [(batch, copy.deepcopy(self)) for batch in all_rays_batched]\n # all_rays = [\n # (self.camera.get_ray(self.n), copy.deepcopy(self))\n # for i in range(samples_per_pixel)\n # ]\n if progress_bar == True:\n try:\n import progressbar\n except ModuleNotFoundError:\n print(\"progressbar module is required. \\nRun: pip install progressbar\")\n\n bar = progressbar.ProgressBar(maxval=samples_per_pixel)\n\n with Pool(processes=n_proc) as pool:\n bar.start()\n for i, color in enumerate(\n pool.imap_unordered(get_raycolor_tuple, args)\n ):\n for batch in range(batch_size):\n beg, end = batch * rays_per_batch, (batch + 1) * rays_per_batch\n color_RGBlinear += color[beg:end]\n bar.update(i)\n bar.finish()\n\n else:\n with Pool(processes=n_proc) as pool:\n for i, color in enumerate(\n pool.imap_unordered(get_raycolor_tuple, args)\n ):\n for batch in range(batch_size):\n beg, end = batch * rays_per_batch, (batch + 1) * rays_per_batch\n color_RGBlinear += color[beg:end]\n\n # average samples per pixel (antialiasing)\n color_RGBlinear = color_RGBlinear / samples_per_pixel\n # gamma correction\n color = cf.sRGB_linear_to_sRGB(color_RGBlinear.to_array())\n\n print(\"Render Took\", time.time() - t0)\n\n img_RGB = []\n for c in color:\n # average ray colors that fall in the same pixel. (antialiasing)\n img_RGB += [\n Image.fromarray(\n (\n 255\n * np.clip(c, 0, 1).reshape(\n (self.camera.screen_height, self.camera.screen_width)\n )\n ).astype(np.uint8),\n \"L\",\n )\n ]\n\n return Image.merge(\"RGB\", img_RGB)\n\n def get_distances(\n self,\n ): # Used for debugging ray-primitive collisions. Return a grey map of objects distances.\n\n print(\"Rendering...\")\n t0 = time.time()\n color_RGBlinear = get_distances(self.camera.get_ray(self.n), scene=self)\n # gamma correction\n color = color_RGBlinear.to_array()\n\n print(\"Render Took\", time.time() - t0)\n\n img_RGB = [\n Image.fromarray(\n (\n 255\n * np.clip(c, 0, 1).reshape(\n (self.camera.screen_height, self.camera.screen_width)\n )\n ).astype(np.uint8),\n \"L\",\n )\n for c in color\n ]\n return Image.merge(\"RGB\", img_RGB)\n"
]
| [
[
"numpy.ceil",
"numpy.clip"
]
]
|
thoolihan/PythonMath | [
"25e1ef092d4bb013a91a4856a255f2a38a9cb46c"
]
| [
"tests/calc_tests.py"
]
| [
"import unittest\nimport numpy as np\nimport calculus as calc\n\nclass CalcTests(unittest.TestCase):\n def test_euler(self):\n x = np.pi / 4.\n y_est, ys, ds = calc.euler(dy_dx = lambda x,y: np.cos(x),\n y0 = 0,\n h = 1e-4,\n start = 0,\n end = x)\n self.assertAlmostEqual(y_est, np.sin(x), places = 3,\n msg = \"Estimate should be close to actual sin of value\")\n\n def test_newton(self):\n fx = lambda x: -4*x**3 - 2*x**2 - 2*x - 3\n dx = lambda x: -12*x**2 - 4*x - 2\n x_n = calc.newton(iter = 10, x0 = -2, fx = fx, dx = dx)\n self.assertAlmostEqual(x_n[2], -1.058263, places = 4,\n msg = \"Estimate of root does not match answer\")\n\n def test_mvt(self):\n a = 1\n b = 4\n fx = lambda x: x**3\n f_c = calc.mvt(a = a, b = b, fx = fx)\n self.assertEqual(f_c, 21, msg = \"MVT of c should be 21\")\n"
]
| [
[
"numpy.sin",
"numpy.cos"
]
]
|
cjer/gtfstk | [
"7e1573a2fae786f4683cefc3c9d1f0310baa5367"
]
| [
"gtfstk/constants.py"
]
| [
"\"\"\"\nConstants useful across modules.\n\"\"\"\n\nimport pandas as pd\n\n# Record some data from the GTFS reference at\n# https://developers.google.com/transit/gtfs/reference/\ncolumns = ['table', 'table_required', 'column', 'column_required', 'dtype']\nrows = [\n ['agency', True, 'agency_id', False, 'str'],\n ['agency', True, 'agency_name', True, 'str'],\n ['agency', True, 'agency_url', True, 'str'],\n ['agency', True, 'agency_timezone', True, 'str'],\n ['agency', True, 'agency_lang', False, 'str'],\n ['agency', True, 'agency_phone', False, 'str'],\n ['agency', True, 'agency_fare_url', False, 'str'],\n ['agency', True, 'agency_email', False, 'str'],\n ['calendar', False, 'service_id', True, 'str'],\n ['calendar', False, 'monday', True, 'int'],\n ['calendar', False, 'tuesday', True, 'int'],\n ['calendar', False, 'wednesday', True, 'int'],\n ['calendar', False, 'thursday', True, 'int'],\n ['calendar', False, 'friday', True, 'int'],\n ['calendar', False, 'saturday', True, 'int'],\n ['calendar', False, 'sunday', True, 'int'],\n ['calendar', False, 'start_date', True, 'str'],\n ['calendar', False, 'end_date', True, 'str'],\n ['calendar_dates', False, 'service_id', True, 'str'],\n ['calendar_dates', False, 'date', True, 'str'],\n ['calendar_dates', False, 'exception_type', True, 'int'],\n ['fare_attributes', False, 'fare_id', True, 'str'],\n ['fare_attributes', False, 'price', True, 'float'],\n ['fare_attributes', False, 'currency_type', True, 'str'],\n ['fare_attributes', False, 'payment_method', True, 'int'],\n ['fare_attributes', False, 'transfers', True, 'int'],\n ['fare_attributes', False, 'transfer_duration', False, 'int'],\n ['fare_rules', False, 'fare_id', True, 'str'],\n ['fare_rules', False, 'route_id', False, 'str'],\n ['fare_rules', False, 'origin_id', False, 'str'],\n ['fare_rules', False, 'destination_id', False, 'str'],\n ['fare_rules', False, 'contains_id', False, 'str'],\n ['feed_info', False, 'feed_publisher_name', True, 'str'],\n ['feed_info', False, 'feed_publisher_url', True, 'str'],\n ['feed_info', False, 'feed_lang', True, 'str'],\n ['feed_info', False, 'feed_start_date', False, 'str'],\n ['feed_info', False, 'feed_end_date', False, 'str'],\n ['feed_info', False, 'feed_version', False, 'str'],\n ['frequencies', False, 'trip_id', True, 'str'],\n ['frequencies', False, 'start_time', True, 'str'],\n ['frequencies', False, 'end_time', True, 'str'],\n ['frequencies', False, 'headway_secs', True, 'int'],\n ['frequencies', False, 'exact_times', False, 'int'],\n ['routes', True, 'route_id', True, 'str'],\n ['routes', True, 'agency_id', False, 'str'],\n ['routes', True, 'route_short_name', True, 'str'],\n ['routes', True, 'route_long_name', True, 'str'],\n ['routes', True, 'route_desc', False, 'str'],\n ['routes', True, 'route_type', True, 'int'],\n ['routes', True, 'route_url', False, 'str'],\n ['routes', True, 'route_color', False, 'str'],\n ['routes', True, 'route_text_color', False, 'str'],\n ['shapes', False, 'shape_id', True, 'str'],\n ['shapes', False, 'shape_pt_lat', True, 'float'],\n ['shapes', False, 'shape_pt_lon', True, 'float'],\n ['shapes', False, 'shape_pt_sequence', True, 'int'],\n ['shapes', False, 'shape_dist_traveled', False, 'float'],\n ['stops', True, 'stop_id', True, 'str'],\n ['stops', True, 'stop_code', False, 'str'],\n ['stops', True, 'stop_name', True, 'str'],\n ['stops', True, 'stop_desc', False, 'str'],\n ['stops', True, 'stop_lat', True, 'float'],\n ['stops', True, 'stop_lon', True, 'float'],\n ['stops', True, 'zone_id', False, 'str'],\n ['stops', True, 'stop_url', False, 'str'],\n ['stops', True, 'location_type', False, 'int'],\n ['stops', True, 'parent_station', False, 'str'],\n ['stops', True, 'stop_timezone', False, 'str'],\n ['stops', True, 'wheelchair_boarding', False, 'int'],\n ['stop_times', True, 'trip_id', True, 'str'],\n ['stop_times', True, 'arrival_time', True, 'str'],\n ['stop_times', True, 'departure_time', True, 'str'],\n ['stop_times', True, 'stop_id', True, 'str'],\n ['stop_times', True, 'stop_sequence', True, 'int'],\n ['stop_times', True, 'stop_headsign', False, 'str'],\n ['stop_times', True, 'pickup_type', False, 'int'],\n ['stop_times', True, 'drop_off_type', False, 'int'],\n ['stop_times', True, 'shape_dist_traveled', False, 'float'],\n ['stop_times', True, 'timepoint', False, 'int'],\n ['transfers', False, 'from_stop_id', True, 'str'],\n ['transfers', False, 'to_stop_id', True, 'str'],\n ['transfers', False, 'transfer_type', True, 'int'],\n ['transfers', False, 'min_transfer_time', False, 'int'],\n ['trips', True, 'route_id', True, 'str'],\n ['trips', True, 'service_id', True, 'str'],\n ['trips', True, 'trip_id', True, 'str'],\n ['trips', True, 'trip_headsign', False, 'str'],\n ['trips', True, 'trip_short_name', False, 'str'],\n ['trips', True, 'direction_id', False, 'int'],\n ['trips', True, 'block_id', False, 'str'],\n ['trips', True, 'shape_id', False, 'str'],\n ['trips', True, 'wheelchair_accessible', False, 'int'],\n ['trips', True, 'bikes_allowed', False, 'int'],\n]\nGTFS_REF = pd.DataFrame(rows, columns=columns)\n\n#: Columns that must be formatted as integers when outputting GTFS\nINT_COLS = GTFS_REF.loc[GTFS_REF['dtype'] == 'int', 'column'].values.tolist()\n\n#: Columns that must be read as strings by Pandas\nSTR_COLS = GTFS_REF.loc[GTFS_REF['dtype'] == 'str', 'column'].values.tolist()\n\nDTYPE = {col: str for col in STR_COLS}\n\n#: Valid distance units\nDIST_UNITS = ['ft', 'mi', 'm', 'km']\n\n#: Primary feed attributes\nFEED_ATTRS_1 = [\n 'agency',\n 'calendar',\n 'calendar_dates',\n 'fare_attributes',\n 'fare_rules',\n 'feed_info',\n 'frequencies',\n 'routes',\n 'shapes',\n 'stops',\n 'stop_times',\n 'trips',\n 'transfers',\n 'dist_units',\n]\n\n#: Secondary feed attributes; derived from primary ones\nFEED_ATTRS_2 = [\n '_trips_i',\n '_calendar_i',\n '_calendar_dates_g',\n]\n\n#:\nFEED_ATTRS = FEED_ATTRS_1 + FEED_ATTRS_2\n\n#: WGS84 coordinate reference system for Geopandas\nWGS84 = {'init': 'epsg:4326'}\n\n#: Colorbrewer 8-class Set2 colors\nCOLORS_SET2 = ['#66c2a5', '#fc8d62', '#8da0cb', '#e78ac3',\n '#a6d854', '#ffd92f', '#e5c494', '#b3b3b3']\n"
]
| [
[
"pandas.DataFrame"
]
]
|
jdsalmonson/mushroom-rl | [
"27557269be5415879e035ff862815d2d5d9fb795"
]
| [
"tests/algorithms/test_ddpg.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom datetime import datetime\nfrom helper.utils import TestUtils as tu\n\nfrom mushroom_rl.core import Agent\nfrom mushroom_rl.algorithms.actor_critic import DDPG, TD3\nfrom mushroom_rl.core import Core\nfrom mushroom_rl.environments.gym_env import Gym\nfrom mushroom_rl.policy import OrnsteinUhlenbeckPolicy\n\n\nclass CriticNetwork(nn.Module):\n def __init__(self, input_shape, output_shape, **kwargs):\n super().__init__()\n\n n_input = input_shape[-1]\n n_output = output_shape[0]\n\n self._h = nn.Linear(n_input, n_output)\n\n nn.init.xavier_uniform_(self._h.weight,\n gain=nn.init.calculate_gain('relu'))\n\n def forward(self, state, action):\n state_action = torch.cat((state.float(), action.float()), dim=1)\n q = F.relu(self._h(state_action))\n\n return torch.squeeze(q)\n\n\nclass ActorNetwork(nn.Module):\n def __init__(self, input_shape, output_shape, **kwargs):\n super(ActorNetwork, self).__init__()\n\n n_input = input_shape[-1]\n n_output = output_shape[0]\n\n self._h = nn.Linear(n_input, n_output)\n\n nn.init.xavier_uniform_(self._h.weight,\n gain=nn.init.calculate_gain('relu'))\n\n def forward(self, state):\n return F.relu(self._h(torch.squeeze(state, 1).float()))\n\n\ndef learn(alg):\n mdp = Gym('Pendulum-v1', 200, .99)\n mdp.seed(1)\n np.random.seed(1)\n torch.manual_seed(1)\n torch.cuda.manual_seed(1)\n\n # Policy\n policy_class = OrnsteinUhlenbeckPolicy\n policy_params = dict(sigma=np.ones(1) * .2, theta=.15, dt=1e-2)\n\n # Settings\n initial_replay_size = 500\n max_replay_size = 5000\n batch_size = 200\n n_features = 80\n tau = .001\n\n # Approximator\n actor_input_shape = mdp.info.observation_space.shape\n actor_params = dict(network=ActorNetwork,\n n_features=n_features,\n input_shape=actor_input_shape,\n output_shape=mdp.info.action_space.shape,\n use_cuda=False)\n\n actor_optimizer = {'class': optim.Adam,\n 'params': {'lr': .001}}\n\n critic_input_shape = (actor_input_shape[0] + mdp.info.action_space.shape[0],)\n critic_params = dict(network=CriticNetwork,\n optimizer={'class': optim.Adam,\n 'params': {'lr': .001}},\n loss=F.mse_loss,\n n_features=n_features,\n input_shape=critic_input_shape,\n output_shape=(1,),\n use_cuda=False)\n\n # Agent\n agent = alg(mdp.info, policy_class, policy_params,\n actor_params, actor_optimizer, critic_params, batch_size,\n initial_replay_size, max_replay_size, tau)\n\n # Algorithm\n core = Core(agent, mdp)\n\n core.learn(n_episodes=10, n_episodes_per_fit=5)\n\n return agent\n\n\ndef test_ddpg():\n policy = learn(DDPG).policy\n w = policy.get_weights()\n w_test = np.array([-0.28865, -0.7487735, -0.5533644, -0.34702766])\n\n assert np.allclose(w, w_test)\n\n\ndef test_ddpg_save(tmpdir):\n agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime(\"%H%M%S%f\"))\n\n agent_save = learn(DDPG)\n\n agent_save.save(agent_path, full_save=True)\n agent_load = Agent.load(agent_path)\n\n for att, method in vars(agent_save).items():\n save_attr = getattr(agent_save, att)\n load_attr = getattr(agent_load, att)\n\n tu.assert_eq(save_attr, load_attr)\n\n\ndef test_td3():\n policy = learn(TD3).policy\n w = policy.get_weights()\n w_test = np.array([1.7005192, -0.73382795, 1.2999079, -0.26730126])\n\n assert np.allclose(w, w_test)\n\n\ndef test_td3_save(tmpdir):\n agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime(\"%H%M%S%f\"))\n\n agent_save = learn(TD3)\n\n agent_save.save(agent_path, full_save=True)\n agent_load = Agent.load(agent_path)\n\n for att, method in vars(agent_save).items():\n save_attr = getattr(agent_save, att)\n load_attr = getattr(agent_load, att)\n\n tu.assert_eq(save_attr, load_attr)\n"
]
| [
[
"torch.nn.Linear",
"numpy.array",
"torch.cuda.manual_seed",
"numpy.random.seed",
"numpy.ones",
"torch.manual_seed",
"numpy.allclose",
"torch.squeeze",
"torch.nn.init.calculate_gain"
]
]
|
PedromfRibeiro/AA2-Embeddings | [
"9d186fe2b8ed7da2cb150dbbf533ed0027aefc6d"
]
| [
"DeepMol/src/parameterOptimization/HyperparameterOpt.py"
]
| [
"\"\"\"Hyperparameter Optimization Class\"\"\"\n\nimport sklearn\nfrom models.Models import Model\nfrom models.sklearnModels import SklearnModel\nfrom models.kerasModels import KerasModel\nfrom metrics.Metrics import Metric\nfrom Datasets.Datasets import Dataset\nfrom typing import Dict, Any, Optional, Tuple\nfrom functools import reduce\nfrom operator import mul\nimport itertools\nimport collections\nimport numpy as np\nimport random\nimport shutil\nimport tempfile\nimport os\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV, StratifiedKFold\nfrom keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor\n\n\n\ndef _convert_hyperparam_dict_to_filename(hyper_params: Dict[str, Any]) -> str:\n \"\"\"Function that converts a dictionary of hyperparameters to a string that can be a filename.\n Parameters\n ----------\n hyper_params: Dict\n Maps string of hyperparameter name to int/float/string/list etc.\n Returns\n -------\n filename: str\n A filename of form \"_key1_value1_value2_..._key2...\"\n \"\"\"\n filename = \"\"\n keys = sorted(hyper_params.keys())\n for key in keys:\n filename += \"_%s\" % str(key)\n value = hyper_params[key]\n if isinstance(value, int):\n filename += \"_%s\" % str(value)\n elif isinstance(value, float):\n filename += \"_%f\" % value\n else:\n filename += \"%s\" % str(value)\n return filename\n\n\ndef validate_metrics(metrics):\n '''Validate single and multi metrics'''\n if isinstance(metrics, dict):\n all_metrics = []\n for m in metrics.values():\n if m in sklearn.metrics.SCORERS.keys() or isinstance(m, sklearn.metrics._scorer._PredictScorer):\n all_metrics.append(m)\n else:\n print(m, ' is not a valid scoring function. Use sorted(sklearn.metrics.SCORERS.keys()) '\n 'to get valid options.')\n else:\n if metrics in sklearn.metrics.SCORERS.keys() or isinstance(metrics, sklearn.metrics._scorer._PredictScorer):\n all_metrics = metrics\n else:\n print('WARNING: ', metrics, ' is not a valid scoring function. '\n 'Use sorted(sklearn.metrics.SCORERS.keys()) to get valid options.')\n\n if not metrics:\n metrics = 'accuracy'\n print('Using accuracy instead and ', metrics, ' on validation!\\n \\n')\n\n return metrics\n\n\nclass HyperparamOpt(object):\n \"\"\"Abstract superclass for hyperparameter search classes.\n \"\"\"\n\n def __init__(self, model_builder: Model, mode: str = None):\n \"\"\"Initialize Hyperparameter Optimizer.\n Note this is an abstract constructor which should only be used by subclasses.\n\n Parameters\n ----------\n model_builder: constructor function.\n This parameter must be constructor function which returns an\n object which is an instance of `Models`. This function\n must accept two arguments, `model_params` of type `dict` and\n 'model_dir', a string specifying a path to a model directory.\n \"\"\"\n if self.__class__.__name__ == \"HyperparamOpt\":\n raise ValueError(\"HyperparamOpt is an abstract superclass and cannot be directly instantiated. \"\n \"You probably want to instantiate a concrete subclass instead.\")\n self.model_builder = model_builder\n self.mode = mode\n\n\n def hyperparam_search(self,\n params_dict: Dict[str, Any],\n train_dataset: Dataset,\n valid_dataset: Dataset,\n metric: Metric,\n use_max: bool = True,\n logdir: Optional[str] = None,\n **kwargs) -> Tuple[Model, Dict[str, Any], Dict[str, float]]:\n \"\"\"Conduct Hyperparameter search.\n This method defines the common API shared by all hyperparameter\n optimization subclasses. Different classes will implement\n different search methods but they must all follow this common API.\n\n Parameters\n ----------\n params_dict: Dict\n Dictionary mapping strings to values. Note that the\n precise semantics of `params_dict` will change depending on the\n optimizer that you're using.\n train_dataset: Dataset\n dataset used for training\n valid_dataset: Dataset\n dataset used for validation(optimization on valid scores)\n metric: Metric\n metric used for evaluation\n use_max: bool, optional\n If True, return the model with the highest score.\n logdir: str, optional\n The directory in which to store created models. If not set, will\n use a temporary directory.\n Returns\n -------\n Tuple[`best_model`, `best_hyperparams`, `all_scores`]\n \"\"\"\n raise NotImplementedError\n\n\nclass HyperparamOpt_Valid(HyperparamOpt):\n \"\"\"\n Provides simple grid hyperparameter search capabilities.\n This class performs a grid hyperparameter search over the specified\n hyperparameter space.\n \"\"\"\n\n def hyperparam_search(self,\n params_dict: Dict,\n train_dataset: Dataset,\n valid_dataset: Dataset,\n metric: Metric,\n n_iter_search: int = 15,\n n_jobs: int = 1,\n verbose: int = 0,\n use_max: bool = True,\n logdir: Optional[str] = None,\n **kwargs):\n\n \"\"\"Perform hyperparams search according to params_dict.\n Each key to hyperparams_dict is a model_param. The values should\n be a list of potential values for that hyperparam.\n Parameters\n ----------\n params_dict: Dict\n Maps hyperparameter names (strings) to lists of possible\n parameter values.\n train_dataset: Dataset\n dataset used for training\n valid_dataset: Dataset\n dataset used for validation(optimization on valid scores)\n metric: Metric\n metric used for evaluation\n n_iter_search: int or None, optional\n Number of random combinations of parameters to test, if None performs complete grid search\n use_max: bool, optional\n If True, return the model with the highest score.\n logdir: str, optional\n The directory in which to store created models. If not set, will\n use a temporary directory.\n Returns\n -------\n Tuple[`best_model`, `best_hyperparams`, `all_scores`]\n \"\"\"\n\n if self.mode is None:\n #TODO: better way of doint this\n if len(set(train_dataset.y)) > 2:\n model = KerasRegressor(build_fn=self.model_builder, **kwargs)\n self.mode = 'regression'\n else:\n model = KerasClassifier(build_fn=self.model_builder, **kwargs)\n self.mode = 'classification'\n elif self.mode == 'classification':\n model = KerasClassifier(build_fn=self.model_builder, **kwargs)\n elif self.mode == 'regression':\n model = KerasRegressor(build_fn=self.model_builder, **kwargs)\n else : raise ValueError('Model operation mode can only be classification or regression!')\n\n print('MODE: ', self.mode)\n hyperparams = params_dict.keys()\n hyperparam_vals = params_dict.values()\n for hyperparam_list in params_dict.values():\n assert isinstance(hyperparam_list, collections.Iterable)\n\n number_combinations = reduce(mul, [len(vals) for vals in hyperparam_vals])\n\n if use_max:\n best_validation_score = -np.inf\n else:\n best_validation_score = np.inf\n\n # To make sure that the number of iterations is lower or equal to the number of max hypaparameter combinations\n len_params = sum(1 for x in itertools.product(*params_dict.values()))\n if n_iter_search is None or len_params < n_iter_search:\n n_iter_search = len_params\n random_inds = random.sample(range(0, len_params), k=n_iter_search)\n best_hyperparams = None\n best_model, best_model_dir = None, None\n all_scores = {}\n j = 0\n print(\"Fitting %d random models from a space of %d possible models.\" % (\n len(random_inds), number_combinations))\n for ind, hyperparameter_tuple in enumerate(itertools.product(*hyperparam_vals)):\n if ind in random_inds:\n j += 1\n model_params = {}\n print(\"Fitting model %d/%d\" % (j, len(random_inds)))\n hyper_params = dict(zip(hyperparams, hyperparameter_tuple))\n for hyperparam, hyperparam_val in zip(hyperparams, hyperparameter_tuple):\n model_params[hyperparam] = hyperparam_val\n print(\"hyperparameters: %s\" % str(model_params))\n\n if logdir is not None:\n model_dir = os.path.join(logdir, str(ind))\n print(\"model_dir is %s\" % model_dir)\n try:\n os.makedirs(model_dir)\n except OSError:\n if not os.path.isdir(model_dir):\n print(\"Error creating model_dir, using tempfile directory\")\n model_dir = tempfile.mkdtemp()\n else:\n model_dir = tempfile.mkdtemp()\n\n try :\n model = SklearnModel(self.model_builder(**model_params), model_dir)\n\n except Exception as e:\n model = KerasModel(self.model_builder(**model_params), model_dir)\n\n model.fit(train_dataset)\n\n try:\n model.save()\n except Exception as e:\n print(e)\n\n multitask_scores = model.evaluate(valid_dataset, [metric])\n valid_score = multitask_scores[metric.name]\n hp_str = _convert_hyperparam_dict_to_filename(hyper_params)\n all_scores[hp_str] = valid_score\n\n if (use_max and valid_score >= best_validation_score) or (\n not use_max and valid_score <= best_validation_score):\n best_validation_score = valid_score\n best_hyperparams = hyperparameter_tuple\n if best_model_dir is not None:\n shutil.rmtree(best_model_dir)\n best_model_dir = model_dir\n best_model = model\n else:\n shutil.rmtree(model_dir)\n\n print(\"Model %d/%d, Metric %s, Validation set %s: %f\" % (\n j, len(random_inds), metric.name, j, valid_score))\n print(\"\\tbest_validation_score so far: %f\" % best_validation_score)\n\n if best_model is None:\n print(\"No models trained correctly.\")\n # arbitrarily return last model\n best_model, best_hyperparams = model, hyperparameter_tuple\n return best_model, best_hyperparams, all_scores\n\n multitask_scores = best_model.evaluate(train_dataset, [metric])\n train_score = multitask_scores[metric.name]\n print(\"Best hyperparameters: %s\" % str(best_hyperparams))\n print(\"train_score: %f\" % train_score)\n print(\"validation_score: %f\" % best_validation_score)\n return best_model, best_hyperparams, all_scores\n\n\n\nclass HyperparamOpt_CV(HyperparamOpt):\n \"\"\"\n Provides simple grid hyperparameter search capabilities.\n This class performs a grid hyperparameter search over the specified\n hyperparameter space.\n \"\"\"\n\n def hyperparam_search(self,\n model_type: str,\n params_dict: Dict,\n train_dataset: Dataset,\n metric: Metric,\n cv: int = 3,\n n_iter_search: int = 15,\n n_jobs: int = 1,\n verbose: int = 0,\n logdir: Optional[str] = None,\n **kwargs):\n\n \"\"\"Perform hyperparams search according to params_dict.\n Each key to hyperparams_dict is a model_param. The values should\n be a list of potential values for that hyperparam.\n Parameters\n ----------\n model_type: str\n string identifying the type of model (sklearn or keras)\n params_dict: Dict\n Maps hyperparameter names (strings) to lists of possible\n parameter values.\n train_dataset: Dataset\n dataset used for training\n metric: Metric\n metric used for evaluation\n cv: int\n number of folds to perform in the cross validation\n n_iter_search: int or None, optional\n Number of random combinations of parameters to test, if None performs complete grid search\n logdir: str, optional\n The directory in which to store created models. If not set, will\n use a temporary directory.\n Returns\n -------\n Tuple['best_model', 'best_hyperparams', 'all_scores']\n \"\"\"\n # TODO: better way of doing this\n if self.mode is None:\n if len(set(train_dataset.y)) > 2:\n self.mode = 'regression'\n else:\n self.mode = 'classification'\n print('MODEL TYPE: ', model_type)\n #diferentiate sklearn model from keras model\n if model_type == 'keras':\n if self.mode == 'classification':\n model = KerasClassifier(build_fn=self.model_builder, **kwargs)\n\n elif self.mode == 'regression':\n model = KerasRegressor(build_fn=self.model_builder, **kwargs)\n\n else : raise ValueError('Model operation mode can only be classification or regression!')\n\n elif model_type == 'sklearn':\n model = self.model_builder()\n #model = SklearnModel(self.model_builder, self.mode)\n\n else : raise ValueError('Only keras and sklearn models are accepted.')\n\n metrics = validate_metrics(metric)\n\n number_combinations = reduce(mul, [len(vals) for vals in params_dict.values()])\n if number_combinations > n_iter_search:\n print(\"Fitting %d random models from a space of %d possible models.\" % (n_iter_search, number_combinations))\n if self.mode == 'classification':\n grid = RandomizedSearchCV(estimator = model, param_distributions = params_dict,\n scoring = metrics, n_jobs=n_jobs, cv=StratifiedKFold(n_splits=cv),\n verbose=verbose, n_iter = n_iter_search, refit=True)\n\n else: grid = RandomizedSearchCV(estimator = model, param_distributions = params_dict,\n scoring = metrics, n_jobs=n_jobs, cv=cv,\n verbose=verbose, n_iter = n_iter_search, refit=True)\n else :\n if self.mode == 'classification':\n grid = GridSearchCV(estimator = model, param_grid = params_dict,\n scoring = metrics, n_jobs=n_jobs, cv=StratifiedKFold(n_splits=cv), verbose=verbose,\n refit=True)\n\n else: grid = RandomizedSearchCV(estimator = model, param_distributions = params_dict,\n scoring = metrics, n_jobs=n_jobs, cv=cv,\n verbose=verbose, n_iter = n_iter_search, refit=True)\n\n #print(train_dataset.X.shape, train_dataset.X.shape[0]/cv)\n grid_result = grid.fit(train_dataset.X, train_dataset.y)\n print(grid_result.best_estimator_)\n print(\"\\n \\n Best %s: %f using %s\" % (metrics, grid_result.best_score_, grid_result.best_params_))\n means = grid_result.cv_results_['mean_test_score']\n stds = grid_result.cv_results_['std_test_score']\n params = grid_result.cv_results_['params']\n for mean, stdev, param in zip(means, stds, params):\n print(\"\\n %s: %f (%f) with: %r \\n\" % (metrics, mean, stdev, param))\n\n if model_type == 'keras':\n best_model = KerasModel(self.model_builder, self.mode, **grid_result.best_params_)\n print('Fitting best model!')\n best_model.fit(train_dataset)\n return best_model, grid_result.best_params_, grid_result.cv_results_\n elif model_type == 'sklearn':\n print(grid_result.best_estimator_)\n return SklearnModel(grid_result.best_estimator_, mode=self.mode), grid_result.best_params_, grid_result.cv_results_\n\n"
]
| [
[
"sklearn.metrics.SCORERS.keys",
"sklearn.model_selection.StratifiedKFold",
"sklearn.model_selection.RandomizedSearchCV"
]
]
|
alealv/glow-tts-train | [
"3e16feedffa07d1c4c4a80ac8c4769d17ace48f2"
]
| [
"glow_tts_train/export_onnx.py"
]
| [
"#!/usr/bin/env python3\nimport argparse\nimport logging\nfrom pathlib import Path\n\nimport torch\n\nfrom .checkpoint import load_checkpoint\nfrom .config import TrainingConfig\n\n_LOGGER = logging.getLogger(\"glow_tts_train.export_onnx\")\n\nOPSET_VERSION = 12\n\n# -----------------------------------------------------------------------------\n\n\ndef main():\n \"\"\"Main entry point\"\"\"\n parser = argparse.ArgumentParser(prog=\"glow-tts-export-onnx\")\n parser.add_argument(\"checkpoint\", help=\"Path to model checkpoint (.pth)\")\n parser.add_argument(\"output\", help=\"Path to output directory\")\n parser.add_argument(\n \"--config\", action=\"append\", help=\"Path to JSON configuration file(s)\"\n )\n parser.add_argument(\n \"--debug\", action=\"store_true\", help=\"Print DEBUG messages to the console\"\n )\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n _LOGGER.debug(args)\n\n # -------------------------------------------------------------------------\n\n # Convert to paths\n if args.config:\n args.config = [Path(p) for p in args.config]\n\n args.checkpoint = Path(args.checkpoint)\n args.output = Path(args.output)\n\n # Load configuration\n config = TrainingConfig()\n if args.config:\n _LOGGER.debug(\"Loading configuration(s) from %s\", args.config)\n config = TrainingConfig.load_and_merge(config, args.config)\n\n # Load checkpoint\n _LOGGER.debug(\"Loading checkpoint from %s\", args.checkpoint)\n checkpoint = load_checkpoint(args.checkpoint, config)\n model = checkpoint.model\n\n _LOGGER.info(\n \"Loaded checkpoint from %s (global step=%s)\",\n args.checkpoint,\n checkpoint.global_step,\n )\n\n # Inference only\n model.eval()\n\n # Do not calcuate jacobians for fast decoding\n with torch.no_grad():\n model.decoder.store_inverse()\n\n old_forward = model.forward\n\n def infer_forward(text, text_lengths, scales):\n noise_scale = scales[0]\n length_scale = scales[1]\n (mel, mel_lengths, *_), _, _ = old_forward(\n text,\n text_lengths,\n gen=True,\n noise_scale=noise_scale,\n length_scale=length_scale,\n )\n\n return (mel, mel_lengths)\n\n model.forward = infer_forward\n\n # Create output directory\n args.output.mkdir(parents=True, exist_ok=True)\n\n # Write config\n with open(args.output / \"config.json\", \"w\") as config_file:\n config.save(config_file)\n\n # Create dummy input\n sequences = torch.randint(\n low=0, high=config.model.num_symbols, size=(1, 50), dtype=torch.long\n ).cuda()\n sequence_lengths = torch.IntTensor([sequences.size(1)]).cuda().long()\n scales = torch.FloatTensor([0.667, 1.0])\n\n dummy_input = (sequences, sequence_lengths, scales)\n\n # Export\n torch.onnx.export(\n model,\n dummy_input,\n str(args.output / \"generator.onnx\"),\n opset_version=OPSET_VERSION,\n do_constant_folding=True,\n input_names=[\"input\", \"input_lengths\", \"scales\"],\n output_names=[\"output\"],\n dynamic_axes={\n \"input\": {0: \"batch_size\", 1: \"phonemes\"},\n \"input_lengths\": {0: \"batch_size\"},\n \"output\": {0: \"batch_size\", 1: \"time\"},\n },\n )\n\n _LOGGER.info(\"Exported model to %s\", args.output)\n\n\n# -----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"torch.randint",
"torch.FloatTensor",
"torch.no_grad"
]
]
|
Yulin832/fromage | [
"f6c84d5684ca5abfcc979540bb97cc8f105f963d"
]
| [
"fromage/tests/test_mol_cell_operations.py"
]
| [
"import numpy as np\nfrom pytest import approx\n\n\ndef test_complete_mol(hc1_cell):\n new_mol, new_cell = hc1_cell.complete_mol(0)\n sel = new_mol.select(0)\n assert len(new_mol) == len(sel)\n\n\ndef test_complete_cell(hc1_cell):\n new_cell, new_mols = hc1_cell.complete_cell()\n assert len(new_mols[0]) == 37\n\n\ndef test_supercell(hc1_cell):\n trans = np.array([2, 2, 2])\n new_cell = hc1_cell.supercell(trans)\n assert len(new_cell) == 1184\n\n\ndef test_big_supercell(hc1_cell):\n trans = np.array([3, 3, 3])\n new_cell = hc1_cell.supercell(trans)\n assert len(new_cell) == 3996\n\n\ndef test_centered_supercell(hc1_cell):\n trans = np.array([1, 1, 1])\n new_cell = hc1_cell.centered_supercell(trans)\n assert len(new_cell) == approx(3996)\n\n\ndef test_centered_supercell_alt(hc1_cell):\n trans = np.array([1, 1, 1])\n new_cell = hc1_cell.centered_supercell(trans, from_origin=True)\n assert len(new_cell) == approx(1184)\n\n\ndef test_make_cluster(hc1_cell):\n clust = hc1_cell.make_cluster(10)\n assert len(clust) == 74\n\n\ndef test_confine(hc1_complete_cell):\n conf = hc1_complete_cell.confined()\n assert conf[19].x == approx(10.97339)\n"
]
| [
[
"numpy.array"
]
]
|
jedavis82/scene_labeling | [
"a5e819f801a4fa96a1f4b076fc2049519687b1de"
]
| [
"level_two_utils/clothing.py"
]
| [
"from skfuzzy import control as ctrl\nimport skfuzzy as fuzz\nimport numpy as np\nfrom fuzzy_utils import create_universes_membership_functions\n\n\nclass ClothingRules:\n def __init__(self, show_sim_result=None):\n prox, over, spat = create_universes_membership_functions()\n self.__show_sim_result = show_sim_result\n self.__proximity = prox\n self.__overlap = over\n self.__spatial_relationships = spat\n self.__create_universes_of_discourse()\n self.__create_membership_functions()\n self.__create_worn_clothing_rules()\n self.__create_carried_clothing_rules()\n\n def __create_universes_of_discourse(self):\n self.__worn_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1), label='worn_interaction')\n self.__carried_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1), label='carried_interaction')\n\n def __create_membership_functions(self):\n self.__worn_interaction['Wearing'] = fuzz.trimf(self.__worn_interaction.universe, [0.4, 0.7, 1.0])\n self.__worn_interaction['Not Wearing'] = fuzz.trimf(self.__worn_interaction.universe, [0.0, 0.3, 0.6])\n self.__carried_interaction['Carrying'] = fuzz.trimf(self.__carried_interaction.universe, [0.4, 0.7, 1.0])\n self.__carried_interaction['Not Carrying'] = fuzz.trimf(self.__carried_interaction.universe, [0.0, 0.3, 0.6])\n\n def __create_worn_clothing_rules(self):\n # IF overlap AND very close OR close THEN wearing\n self.__wearing_rule = ctrl.Rule(self.__overlap['Overlap'] &\n (self.__proximity['Close'] | self.__proximity['Very Close']),\n self.__worn_interaction['Wearing'])\n # IF no overlap THEN not wearing\n # Will need rule for no overlap, and rule for overlap and medium, far, very far\n self.__not_wearing_rule1 = ctrl.Rule(self.__overlap['No Overlap'], self.__worn_interaction['Not Wearing'])\n self.__not_wearing_rule2 = ctrl.Rule(self.__overlap['Overlap'] &\n (self.__proximity['Medium'] | self.__proximity['Far'] |\n self.__proximity['Very Far']), self.__worn_interaction['Not Wearing'])\n self.__wearing_ctrl = ctrl.ControlSystem([self.__wearing_rule, self.__not_wearing_rule1,\n self.__not_wearing_rule2])\n self.__wearing_sim = ctrl.ControlSystemSimulation(self.__wearing_ctrl, flush_after_run=100)\n\n # clothing items. Go back through and check again after running these rules. Ties and umbrellas for example\n self.__carrying_rule = ctrl.Rule(self.__overlap['Overlap'] &\n (self.__proximity['Close'] | self.__proximity['Very Close'] |\n self.__proximity['Medium']), self.__carried_interaction['Carrying'])\n self.__not_carrying_rule1 = ctrl.Rule(self.__overlap['No Overlap'],\n self.__carried_interaction['Not Carrying'])\n self.__not_carrying_rule2 = ctrl.Rule(self.__overlap['Overlap'] &\n (self.__proximity['Very Far'] |\n self.__proximity['Far']), self.__carried_interaction['Not Carrying'])\n self.__carrying_ctrl = ctrl.ControlSystem([self.__carrying_rule, self.__not_carrying_rule1,\n self.__not_carrying_rule2])\n self.__carrying_sim = ctrl.ControlSystemSimulation(self.__carrying_ctrl, flush_after_run=100)\n\n def __create_carried_clothing_rules(self):\n # clothing items. Go back through and check again after running these rules. Ties and umbrellas for example\n self.__carrying_rule = ctrl.Rule(self.__overlap['Overlap'] &\n (self.__proximity['Close'] | self.__proximity['Very Close'] |\n self.__proximity['Medium']), self.__carried_interaction['Carrying'])\n self.__not_carrying_rule1 = ctrl.Rule(self.__overlap['No Overlap'],\n self.__carried_interaction['Not Carrying'])\n self.__not_carrying_rule2 = ctrl.Rule(self.__overlap['Overlap'] &\n (self.__proximity['Very Far'] |\n self.__proximity['Far']), self.__carried_interaction['Not Carrying'])\n self.__carrying_ctrl = ctrl.ControlSystem([self.__carrying_rule, self.__not_carrying_rule1,\n self.__not_carrying_rule2])\n self.__carrying_sim = ctrl.ControlSystemSimulation(self.__carrying_ctrl, flush_after_run=100)\n\n def compute_worn_interaction(self, giou, iou, sr_angle):\n self.__wearing_sim.input['overlap'] = iou\n self.__wearing_sim.input['proximity'] = giou\n self.__wearing_sim.compute()\n wearing_result = self.__wearing_sim.output['worn_interaction']\n if self.__show_sim_result:\n self.__worn_interaction.view(sim=self.__wearing_sim)\n wearing = fuzz.interp_membership(self.__worn_interaction.universe, self.__worn_interaction['Wearing'].mf,\n wearing_result)\n not_wearing = fuzz.interp_membership(self.__worn_interaction.universe,\n self.__worn_interaction['Not Wearing'].mf, wearing_result)\n membership = {'Wearing': wearing, 'Not Wearing': not_wearing}\n ret_label = max(membership, key=membership.get)\n if ret_label == 'Not Wearing':\n return None\n else:\n return ret_label\n\n def compute_carried_interaction(self, giou, iou, sr_angle):\n self.__carrying_sim.input['overlap'] = iou\n self.__carrying_sim.input['proximity'] = giou\n self.__carrying_sim.compute()\n carrying_result = self.__carrying_sim.output['carried_interaction']\n if self.__show_sim_result:\n self.__carried_interaction.view(sim=self.__carrying_sim)\n carrying = fuzz.interp_membership(self.__carried_interaction.universe,\n self.__carried_interaction['Carrying'].mf, carrying_result)\n not_carrying = fuzz.interp_membership(self.__carried_interaction.universe,\n self.__carried_interaction['Not Carrying'].mf, carrying_result)\n membership = {'Carrying': carrying, 'Not Carrying': not_carrying}\n ret_label = max(membership, key=membership.get)\n if ret_label == 'Not Carrying':\n return None\n else:\n return ret_label\n\n def compute_interaction(self, label, dom_cat, sub_cat, giou, iou, sr_angle):\n \"\"\"Use the sub_cat to determine the rule to call\"\"\"\n if sub_cat == 'worn':\n res_label = self.compute_worn_interaction(giou, iou, sr_angle)\n return res_label\n elif sub_cat == 'carried':\n res_label = self.compute_carried_interaction(giou, iou, sr_angle)\n return res_label\n"
]
| [
[
"numpy.arange"
]
]
|
Nilesh6519/Stock-Sentiment-Analysis | [
"935e62a524d62cf9face43b5875a4ab54b3e5bce"
]
| [
"Stock Sentiment Analysis.py"
]
| [
"# ## Stock Sentiment Analysis using News Headlines\n\nimport pandas as pd\ndf=pd.read_csv('Data.csv', encoding = \"ISO-8859-1\")\ndf.head()\ntrain = df[df['Date'] < '20150101']\ntest = df[df['Date'] > '20141231']\ndata=train.iloc[:,2:27]\ndata.replace(\"[^a-zA-Z]\",\" \",regex=True, inplace=True)\n\n# Renaming column names for ease of access\nlist1= [i for i in range(25)]\nnew_Index=[str(i) for i in list1]\ndata.columns= new_Index\ndata.head(5)\n\n# Convertng headlines to lower case\nfor index in new_Index:\n data[index]=data[index].str.lower()\ndata.head(1)\n\n' '.join(str(x) for x in data.iloc[1,0:25])\n\nheadlines = []\nfor row in range(0,len(data.index)):\n headlines.append(' '.join(str(x) for x in data.iloc[row,0:25]))\nheadlines[0]\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\n\n## implement BAG OF WORDS\ncountvector=CountVectorizer(ngram_range=(2,2))\ntraindataset=countvector.fit_transform(headlines)\n\n# implement RandomForest Classifier\nrandomclassifier=RandomForestClassifier(n_estimators=200,criterion='entropy')\nrandomclassifier.fit(traindataset,train['Label'])\n\n## Predict for the Test Dataset\ntest_transform= []\nfor row in range(0,len(test.index)):\n test_transform.append(' '.join(str(x) for x in test.iloc[row,2:27]))\ntest_dataset = countvector.transform(test_transform)\npredictions = randomclassifier.predict(test_dataset)\n\n## Import library to check accuracy\nfrom sklearn.metrics import classification_report,confusion_matrix,accuracy_score\n\nmatrix=confusion_matrix(test['Label'],predictions)\nprint(matrix)\nscore=accuracy_score(test['Label'],predictions)\nprint(score)\nreport=classification_report(test['Label'],predictions)\nprint(report)\n\n"
]
| [
[
"sklearn.metrics.confusion_matrix",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv"
]
]
|
cm-1/2D-External-Visual-Hulls | [
"579e7d18d048d403b636d326840e5cb2a4e3a3e8"
]
| [
"visHullTwoD.py"
]
| [
"import numpy as np\nimport math\nfrom enum import Enum\nfrom collections import deque\nimport heapq\n\nfrom RedBlackTree.rb_tree import RedBlackTree\n\nEQUAL_THRESHOLD = 0.001 # Threshold for considering certain fp numbers equal below.\nEQUAL_DECIMAL_PLACES = -round(math.log(EQUAL_THRESHOLD, 10))\n\nclass EventType(Enum):\n LEFT = 0\n INTERSECTION = 1\n RIGHT = 2\n \n\nclass SideOfShapeType(Enum):\n UNTESTED = 0\n LEFT = 1\n INSIDE = 2\n RIGHT = 3\n \n \n \n\nclass SweepLine:\n def __init__(self, x, y, eventType):\n self.x = x\n self.y = y\n self.eventType = eventType\n \nclass Vertex:\n def __init__(self, position, outgoingHalfEdge, vertexID = -1):\n self.position = np.array(position)\n self.outgoingHalfEdge = outgoingHalfEdge\n self.vertexID = vertexID\n '''def __eq__(self, other):\n return np.linalg.norm(self.position - other.position) < EQUAL_THRESHOLD and self.vertexID == other.vertexID and self.outgoingHalfEdge == other.outgoingHalfEdge\n def __hash__(self):\n return hash((self.position[0], self.position[1], self.vertexID))'''\n \nclass Face:\n def __init__(self, halfEdge, index):\n self.halfEdge = halfEdge\n self.index = index\n self.visualNumber = -1\n def getCoords(self):\n v = self.halfEdge.headVertex.position\n vertices = [v]\n origHE = self.halfEdge\n he = self.halfEdge.next\n while he != origHE:\n #if he.headVertex is None:\n # break\n v = he.headVertex.position\n vertices.append(v)\n he = he.next\n return np.array(vertices)\n def __eq__(self, other):\n return self.index == other.index\n def __hash__(self):\n return hash(self.index)\n\nclass HalfEdge:\n def __init__(self, index, increasesLeft):\n self.headVertex = None\n self.next = None\n self.prev = None\n self.pair = None\n self.leftFace = None\n self.index = index\n self.increasesLeft = increasesLeft\n\nclass HalfEdgeStructure:\n def __init__(self):\n self.verts = []\n self.halfEdges = {}\n self.faces = {}\n self.halfEdgeIndexCounter = 0\n self.faceIndexCounter = 0\n self._exteriorFaceIndex = -1\n self.vertexOnShape = None\n\n def assignExteriorFace(self, halfEdge):\n if self._exteriorFaceIndex < 0:\n self._exteriorFaceIndex = self.faceIndexCounter\n self.createNewFace(halfEdge)\n halfEdge.leftFace = self.faces[self._exteriorFaceIndex]\n \n def removeFace(self, face):\n if face.index in self.faces:\n del self.faces[face.index]\n\n def removeHalfEdgePair(self, halfEdge):\n index0 = halfEdge.index\n index1 = halfEdge.pair.index\n if index0 > index1:\n index0, index1 = index1, index0\n \n del self.halfEdges[index1]\n del self.halfEdges[index0]\n\n def createNewPairOfHalfEdges(self, vertex, increasesLeft):\n newEdge = HalfEdge(self.halfEdgeIndexCounter, increasesLeft)\n newPair = HalfEdge(self.halfEdgeIndexCounter + 1, not increasesLeft)\n newPair.headVertex = vertex\n \n newEdge.pair = newPair\n newPair.pair = newEdge\n self.halfEdges[self.halfEdgeIndexCounter] = newEdge\n self.halfEdges[self.halfEdgeIndexCounter + 1] = newPair\n self.halfEdgeIndexCounter += 2\n return newEdge\n \n def createNewFace(self, halfEdge):\n newFace = Face(halfEdge, self.faceIndexCounter)\n self.faces[self.faceIndexCounter] = newFace\n self.faceIndexCounter += 1\n return newFace\n \n def isExteriorFace(self, face):\n return face.index == self._exteriorFaceIndex\n \nclass MySweepEvent:\n def __init__(self, x, y, segments, eventType, debugID = -1):\n self.x = x\n self.y = y\n self.segments = segments\n self.eventType = eventType\n self.debugID = debugID\n \n def __repr__(self):\n retStr = \"\"\n if self.eventType == EventType.INTERSECTION:\n retStr = \"<{0}, {1}>, segIDS: {2}, {3}. dbID: {4}\".format(self.x, self.y, [s.index for s in self.segments], \"INTERSECTION\", self.debugID) \n else:\n eventStr = \"LEFT\" if self.eventType == EventType.LEFT else \"RIGHT\"\n seg = next(iter(self.segments))\n retStr = \"{0}, segID: {1}, {2}\".format(str(seg), seg.index, eventStr)\n return retStr\n\n def __eq__(self, other):\n xEqual = abs(self.x - other.x) < EQUAL_THRESHOLD\n yEqual = abs(self.y - other.y) < EQUAL_THRESHOLD\n typesEqual = self.eventType == other.eventType\n return xEqual and yEqual and typesEqual\n\n def __lt__(self, other):\n if self.__eq__(other):\n return False\n \n retVal = False\n if self.x < other.x - EQUAL_THRESHOLD:\n retVal = True\n elif abs(self.x - other.x) < EQUAL_THRESHOLD:\n if self.y < other.y - EQUAL_THRESHOLD:\n retVal = True\n elif abs(self.y - other.y) < EQUAL_THRESHOLD:\n if self.eventType.value < other.eventType.value:\n retVal = True\n return retVal\n \n def merge(self, other):\n self.segments = self.segments.union(other.segments)\n return self\n \n def debugEq(self, other):\n ret = {}\n ret[\"debugIDsEq\"] = (self.debugID == other.debugID)\n ret[\"eventsEq\"] = (self.eventType == other.eventType)\n if not ret[\"eventsEq\"]:\n print(\" EventTypeDiff: \", self.eventType, other.eventType)\n \n ret[\"numSegmentsEq\"] = (len(self.segments) == len(other.segments))\n \n ret[\"allSegmentsEq\"] = True\n if ret[\"numSegmentsEq\"]:\n selfSegs = list(self.segments)\n otherSegs = list(other.segments)\n selfSegs.sort()\n otherSegs.sort()\n \n for i in range(len(selfSegs)):\n s = selfSegs[i]\n o = otherSegs[i]\n \n p0IndexEq = (s.p0Index == o.p0Index)\n p1IndexEq = (s.p1Index == o.p1Index)\n activeTypeEq = (s.activeType == o.activeType)\n increaseDirEq = (s.increasesToTheRight == o.increasesToTheRight)\n indexEq = (s.index == o.index)\n \n ret[\"allSegmentsEq\"] = (p0IndexEq and p1IndexEq and activeTypeEq and increaseDirEq and indexEq)\n if not ret[\"allSegmentsEq\"]:\n break\n else:\n ret[\"allSegmentsEq\"] = False\n \n return ret\n \n \nclass SegmentType(Enum):\n A = 1 # The case where the segment is an edge of the polygon\n B = 2 # The case where the two \"tris\" are on the same side.\n C = 3 # The case where the two \"tris\" are on opposite sides.\n D = 4 # A mixed case caused by the \"union\" scenario of verts on same line\n \n\n\n# This is basically a struct for the line intersection algorithm to return.\nclass MyIntersection:\n # meetS is for the first intersection\n # meetT is for the second intersection\n def __init__(self, doMeet, meetS, meetT, meetPt):\n self.doMeet = doMeet\n self.meetS = meetS\n self.meetT = meetT\n self.meetPt = meetPt\n \n\nclass MyLine:\n # By taking x0 and x1 as arguments, can create both lines and segments with same object\n def __init__(self, p0, p1, isSegment):\n self.p0 = np.array(p0)\n self.p1 = np.array(p1)\n self.isSegment = isSegment\n self.length = np.linalg.norm(self.p1 - self.p0)\n \n self.isVertical = abs(p0[0] - p1[0]) < EQUAL_THRESHOLD\n self.m = None\n self.b = None\n if not self.isVertical:\n self.m = (p1[1] - p0[1])/(p1[0] - p0[0])\n self.b = p0[1] - self.m * p0[0]\n \n # Normalized direction vec from p0 to p1\n self.dir = self.p1 - self.p0\n if self.length > 0:\n self.dir = self.dir/self.length\n \n def __repr__(self):\n return \"({0}->{1}), isSegment: {2}\".format(self.p0, self.p1, self.isSegment)\n \n # Math here basically came from setting up an augmented matrix\n # for the 2D case of line intersection and solving it.\n # So commenting may not be the best here.\n def intersection (self, other):\n # Delta is the difference in \"P0's\" between lines.\n deltaX = self.p0[0] - other.p0[0]\n deltaY = self.p0[1] - other.p0[1]\n \n # d is the direction vector for self\n dx = self.dir[0]\n dy = self.dir[1]\n # e is the direction vector for the other line\n ex = other.dir[0]\n ey = other.dir[1]\n \n # If lines parallel, there is no (normal) intersection.\n # If they are both infinite lines, they MIGHT infinitely intersect.\n # But in this case, it'll be treated as \"false\".\n # If both lines are segments, they MIGHT intersect at just one point.\n # While it is technically possible for line segments to intersect\n # at an endpoint and also intersect infinitely, that will never\n # happen in the case this function is applied to, so I'm\n # only going to check if exactly one pair of endpoints are equal.\n if abs(dy*ex - ey*dx) < EQUAL_THRESHOLD:\n if self.isSegment and other.isSegment:\n sP0 = self.p0.round(EQUAL_DECIMAL_PLACES)\n sP1 = self.p1.round(EQUAL_DECIMAL_PLACES)\n oP0 = other.p0.round(EQUAL_DECIMAL_PLACES)\n oP1 = other.p1.round(EQUAL_DECIMAL_PLACES)\n endpointEqualities = np.array([np.all(sP0 == oP0), np.all(sP0 == oP1), np.all(sP1 == oP0), np.all(sP1 == oP1)])\n numEqual = np.sum(endpointEqualities)\n if numEqual == 1:\n eqIndex = np.where(endpointEqualities)[0][0]\n s = int(eqIndex > 1) * self.length\n t = int(eqIndex % 2) * other.length\n return MyIntersection(True, s, t, self.p0 + s*self.dir)\n # If none of the above hold, there's no or infinite intersection.\n return MyIntersection(False, 0, 0, (0,0)) \n \n # If dx is 0, we need to switch x and y.\n # Otherwise, we'd be dividing by 0 later on.\n # This change in coordinates won't affect s or t.\n if (abs(dx) < EQUAL_THRESHOLD):\n deltaX, deltaY = deltaY, deltaX\n dx, dy = dy, dx\n ex, ey = ey, ex\n \n # Math checks out here when done on paper, solving augmented matrix.\n t = (dy*deltaX - dx*deltaY)/(dy*ex - ey*dx)\n s = -deltaX/dx + (ex/dx)*t\n \n # Return the struct-like object.\n return MyIntersection(True, s, t, self.p0 + s*self.dir)\n\n \nclass MyActiveLine(MyLine):\n def __init__(self, p0, p1, p0Index, p1Index, activeType, increasesToTheRight):\n super().__init__(p0, p1, True)\n self.p0Index = p0Index\n self.p1Index = p1Index\n self.activeType = activeType\n self.increasesToTheRight = increasesToTheRight\n def __repr__(self):\n return \"({0}->{1}), right+ is {2}\".format(self.p0, self.p1, self.increasesToTheRight)\n def swapDir(self):\n self.p0, self.p1 = self.p1, self.p0\n self.p0Index, self.p1Index = self.p1Index, self.p0Index\n self.increasesToTheRight = not self.increasesToTheRight\n\nclass MySortableSegment(MyActiveLine):\n def __init__(self, activeLine, sweepLine, index):\n super().__init__(\n activeLine.p0,\n activeLine.p1,\n activeLine.p0Index,\n activeLine.p1Index,\n activeLine.activeType,\n activeLine.increasesToTheRight\n )\n self.sweepLine = sweepLine\n self.index = index\n self.node = None\n self.lastIntersectionY = activeLine.p0[1]\n self.forwardHalfEdge = None\n \n def __repr__(self):\n return \"({0}->{1})\".format(self.p0, self.p1)\n \n def currentY(self):\n if self.isVertical:\n return self.lastIntersectionY\n return self.m * self.sweepLine.x + self.b\n\n # Might rethink this at some point.\n # But be careful not to break == usage in tree!\n def __eq__(self, other): \n if other:\n diff0 = np.linalg.norm(self.p0 - other.p0)\n diff1 = np.linalg.norm(self.p1 - other.p1)\n if diff0 < EQUAL_THRESHOLD and diff1 < EQUAL_THRESHOLD:\n return True\n return False\n def __ne__(self, other):\n return not self.__eq__(other)\n \n def __lt__(self, other):\n retVal = False\n selfY = self.currentY()\n otherY = other.currentY()\n diff = selfY - otherY\n if abs(diff) < EQUAL_THRESHOLD:\n surpassed = (self.sweepLine.y > selfY + EQUAL_THRESHOLD)\n hereButRemoving = abs(self.sweepLine.y - selfY) < EQUAL_THRESHOLD and self.sweepLine.eventType == EventType.RIGHT\n # Special case where two vertical line segments connect\n if self.isVertical and other.isVertical:\n retVal = self.p1[1] > other.p1[1]\n elif self.isVertical:\n retVal = True\n elif other.isVertical:\n retVal = False\n elif abs(self.m - other.m) < EQUAL_THRESHOLD:\n return self.p0[0] < other.p0[0]\n else:\n retVal = self.m > other.m\n \n if surpassed or hereButRemoving:\n retVal = not retVal\n else:\n retVal = diff < 0\n return retVal\n \n def __le__(self, other):\n return self.__lt__(other) or self.__eq__(other)\n def __gt__(self, other):\n return not self.__le__(other)\n def __ge__(self, other):\n return not self.__lt__(other)\n\n def __hash__(self):\n return hash(self.index) # Maybe return just self.index?\n \nclass MyPolygon:\n def __init__(self, pts):\n npts = np.array(pts)\n \n # Find out whether the polygon is clockwise or counterclockwise.\n # Will be using the technique given in StackOverflow user Beta's answer\n # to user Stécy's question \"How to determine if a list of polygon points are in clockwise order?\"\n # asked on 2009-07-22. Link: https://stackoverflow.com/a/1165943/11295586\n # An explanation is given at: https://web.archive.org/web/20200812125342/https://www.element84.com/blog/determining-the-winding-of-a-polygon-given-as-a-set-of-ordered-points\n # Basically, sum up (x2 − x1)(y2 + y1) over all edges\n # Curve is clockwise iff sum is positive.\n xy0s = npts\n xy1s = np.roll(xy0s, -1, axis=0) # Shift vertices by 1 index\n terms = (xy1s[:, 0] - xy0s[:, 0])*(xy1s[:, 1] + xy0s[:, 1])\n twiceArea = terms.sum() # As described in the links above, the sum is twice the area\n self._isCW = (twiceArea > 0)\n \n # \"Close\" the polygon s.t. the first and last vertex are identical.\n # First, check if the first and last points are already the same.\n if not np.all(npts[0] == npts[-1]):\n # If not, add the first pt to the end.\n npts = np.vstack((npts, npts[0, :]))\n self._coords = npts\n \n def changeOrientation(self):\n self._isCW = not self._isCW\n self._coords = np.flip(self._coords, axis=0)\n \n def isClockwise(self):\n return self._isCW\n \n def getCoords(self):\n return np.copy(self._coords) # Look into the shallow/deep copy nature of this at some point!\n \n def getSeparateXYs(self):\n xs = self._coords[:, 0]\n ys = self._coords[:, 1]\n \n return (xs, ys)\n \nclass Scene:\n \n def __init__(self):\n # These can maybe be combined into a dataframe or list of structs at some point.\n self.polygons = []\n self.cwList = []\n \n # In addition to keeping track of individual polygons,\n # we also keep track of ALL vertices in the scene.\n self.vertices = np.empty((0, 2))\n # These can maybe be combined with self.vertices into a dataframe or something\n self.prevIndices = np.empty(0, dtype=np.int)\n self.nextIndices = np.empty(0, dtype=np.int)\n self.polygonIndices = np.empty(0, dtype=np.int)\n \n self.lines = []\n self.activeSegments = []\n \n # Boundaries for the scene.\n self.minX = math.inf\n self.maxX = -math.inf\n \n self.minY = math.inf\n self.maxY = -math.inf\n \n self.partitionMesh = None\n self.drawableFaces = []\n \n self.eventsRecord = []\n\n def resetVisHullCalcs(self):\n self.activeSegments = []\n self.partitionMesh = None\n self.drawableFaces = []\n self.eventsRecord = []\n \n def createActiveSegments(self, index0, index1):\n v00 = self.vertices[self.prevIndices[index0]]\n v01 = self.vertices[index0]\n v02 = self.vertices[self.nextIndices[index0]]\n \n v10 = self.vertices[self.prevIndices[index1]]\n v11 = self.vertices[index1]\n v12 = self.vertices[self.nextIndices[index1]]\n \n cwV0 = self.cwList[self.polygonIndices[index0]]\n \n # If the two vertices form an edge, then it's the first case.\n if self.prevIndices[index0] == index1:\n return [MyActiveLine(v11, v01, index1, index0, SegmentType.A, not cwV0)]\n elif self.nextIndices[index0] == index1:\n return [MyActiveLine(v01, v11, index0, index1, SegmentType.A, not cwV0)]\n\n \n # Otherwise, need to determine which side of the line the two vertices \"triangles\" are on.\n # I'm going to use the inward-pointing bisector of each vertex's angle represent the direction pointing \"inside\" the triangle from the vertex.\n # The reason for using the bisector, rather than just one of the edges, is because\n # it is possible for one of the edges to lie on the line, but the bisector\n # never will.\n \n dir00 = v00 - v01\n dir01 = v02 - v01\n # Make sure both dirs are normalized\n length00 = np.linalg.norm(dir00)\n length01 = np.linalg.norm(dir01)\n dir00 = dir00 / length00\n dir01 = dir01 / length01\n \n dir10 = v10 - v11\n dir11 = v12 - v11\n # Make sure both dirs are normalized\n length10 = np.linalg.norm(dir10)\n length11 = np.linalg.norm(dir11)\n dir10 = dir10 / length10\n dir11 = dir11 / length11\n \n \n # Get the line bisecting the vertex's angle\n unnormedBisect0 = dir00 + dir01\n bisector0 = unnormedBisect0/np.linalg.norm(unnormedBisect0)\n \n \n # Get the line bisecting the vertex's angle\n unnormedBisect1 = dir10 + dir11\n bisector1 = unnormedBisect1/np.linalg.norm(unnormedBisect1)\n\n \n # Then, we will consider a local coordinate frame where the free line is the up vector\n # From this, the \"right\" vector will be [y, -x]\n # The matrix to bring vectors into this local coord frame will be:\n # | y -x |\n # | x y | \n up = v11 - v01\n changeBasis = np.array([\n [up[1], -up[0]],\n [up[0], up[1]]\n ])\n \n # We convert the bisectors into this coord frame and see if their x values have the same sign.\n localBisector0 = changeBasis @ bisector0\n localBisector1 = changeBasis @ bisector1\n \n retVals = []\n if localBisector0[0] > 0 and localBisector1[0] > 0:\n retVals = [MyActiveLine(v11, v01, index1, index0, SegmentType.B, True)]\n elif localBisector0[0] < 0 and localBisector1[0] < 0:\n retVals = [MyActiveLine(v01, v11, index0, index1, SegmentType.B, True)]\n else:\n b0, b1 = self.sceneBorderHitPoints(MyLine(v01, v11, False))\n if np.dot((b1 - b0), up) < 0:\n b0, b1 = b1, b0\n \n incToRight = localBisector0[0] < 0\n seg1 = MyActiveLine(b0, v01, -1, index0, SegmentType.C, incToRight)\n seg2 = MyActiveLine(b1, v11, -1, index1, SegmentType.C, incToRight)\n retVals = [seg1, seg2]\n \n return retVals\n \n def addPolygon(self, pts):\n newVertices = np.array(pts, dtype=np.float64)\n\n newPolygon = MyPolygon(pts)\n\n self.polygons.append(newPolygon)\n \n \n self.cwList.append(newPolygon.isClockwise())\n \n \n # Separate the x and y values for the new vertices.\n xs = newVertices[:, 0]\n ys = newVertices[:, 1]\n \n # Get the min/max x & y for this polygon.\n newMinX = xs.min()\n newMaxX = xs.max()\n newMinY = ys.min()\n newMaxY = ys.max()\n \n # Update the world's min/max x & y if necessary.\n if newMinX < self. minX:\n self.minX = newMinX\n if newMaxX > self. maxX:\n self.maxX = newMaxX\n if newMinY < self. minY:\n self.minY = newMinY\n if newMaxY > self. maxY:\n self.maxY = newMaxY\n \n # Update the prevIndices and newIndices lists\n startIndex = self.vertices.shape[0]\n newPrevIndices = np.roll(np.arange(startIndex, startIndex + newVertices.shape[0]), 1, axis=0)\n newNextIndices = np.roll(np.arange(startIndex, startIndex + newVertices.shape[0]), -1, axis=0)\n self.prevIndices = np.concatenate((self.prevIndices, newPrevIndices))\n self.nextIndices = np.concatenate((self.nextIndices, newNextIndices))\n \n # Update the polygonIndices list\n self.polygonIndices = np.concatenate((self.polygonIndices, np.full(newVertices.shape[0], len(self.polygons) - 1)))\n \n\n # Once that is done, update the vertices list.\n self.vertices = np.concatenate((self.vertices, newVertices))\n \n def addLine(self, p0, p1):\n # At this time, I'm assuming all lines added to the scene are \n # full \"lines\", not \"segments\".\n self.lines.append(MyLine(p0, p1, False))\n \n def calcFreeLines(self):\n self.resetVisHullCalcs()\n vertLineDict = {}\n nonVertLineDict = {}\n for i in range(len(self.vertices) - 1):\n if self.isVertexConcave(i):\n continue\n for j in range(i+1, len(self.vertices)):\n if self.isVertexConcave(j):\n continue\n candidate = MyLine(self.vertices[i], self.vertices[j], False)\n vertDistOnEachAxis = abs(self.vertices[i] - self.vertices[j])\n verticesTouch = np.all(vertDistOnEachAxis < EQUAL_THRESHOLD)\n \n intersectsObj = verticesTouch\n \n vertIntersections = []\n intersectedVertexIndices = set()\n \n polygonCount = 0\n vertCount = 0 # Vertex index of start of current edge analyzed\n while polygonCount < len(self.polygons) and not intersectsObj:\n obj = self.polygons[polygonCount]\n pts = obj.getCoords()\n numPts = len(pts)\n edgeNum = 0 # Like vertCount, but just for this polygon rather than whole scene.\n \n # pts, i.e. obj.getCoords(), is organized where the 1st vertex is repeated at the end.\n # Therefore, for the edge between the 1st and last vertices,\n # we don't need to cycle around with a (n + 1)%numPts or anything.\n # The line between the 1st and last vertices is created using the 2nd-last and last array items.\n while edgeNum < numPts-1 and not intersectsObj:\n # Get the two vertices on either side of the line.\n # np.array is used as a vector.\n v0 = np.array(pts[edgeNum])\n v1 = np.array(pts[edgeNum+1])\n edgeLine = MyLine(v0, v1, True)\n intersection = candidate.intersection(edgeLine)\n if intersection.doMeet and intersection.meetT > -EQUAL_THRESHOLD and intersection.meetT < edgeLine.length + EQUAL_THRESHOLD:\n # If the lines intersect, the line and edge/segment probably do...\n intersectsThisTime = True\n # ...but we should test and rule out non-transversal intersections\n # Infinite intersections are already \"discarded\" by the intersection() function.\n # But we need to rule out intersections with a vertex that do not pierce the shape,\n # because these are fine (in fact, they are REQUIRED for the algorithm).\n # We first deal with the line intersecting the vertex at the start of its edge, at v0.\n sideResult = SideOfShapeType.UNTESTED\n if (abs(intersection.meetT) < EQUAL_THRESHOLD):\n # Test if candidate.dir is between both edge dirs going AWAY from v0\n sideResult = self.isLineInsideEdgeAngle(vertCount, candidate.dir)\n intersectsThisTime = (sideResult == SideOfShapeType.INSIDE)\n if vertCount not in intersectedVertexIndices:\n vertIntersection = {\"distAlongLine\": intersection.meetS, \"side\": sideResult}\n vertIntersections.append(vertIntersection)\n intersectedVertexIndices.add(vertCount)\n # Same idea, but for the case where the intersection is at\n # the other side of the edge, closer to v1\n elif (abs(intersection.meetT - edgeLine.length) < EQUAL_THRESHOLD):\n # Test if candidate.dir is between both edge dirs going AWAY from v1\n nextVertIndex = self.nextIndices[vertCount]\n sideResult = self.isLineInsideEdgeAngle(nextVertIndex, candidate.dir)\n intersectsThisTime = (sideResult == SideOfShapeType.INSIDE)\n if nextVertIndex not in intersectedVertexIndices:\n vertIntersection = {\"distAlongLine\": intersection.meetS, \"side\": sideResult}\n vertIntersections.append(vertIntersection)\n intersectedVertexIndices.add(nextVertIndex)\n #if intersectsThisTime:\n # print(candidate.p0, \"->\", candidate.p1, \"intersects\", v0, \"->\", v1, \"meetT:\", intersection.meetT, \"len:\", edgeLine.length)\n intersectsObj = (intersectsObj or intersectsThisTime)\n \n edgeNum += 1\n vertCount += 1\n polygonCount += 1\n \n # Suppose the line grazes multiple vertices, but does not\n # penetrate the shape's interior at all. It may still\n # not really be a line of site. \n # Consider, for example, a line passing through two vertices\n # touching each other at opposite sides of a line, i.e.\n # something that locally looks like >< with the line passing\n # through vertically. Or a line passing vertically through\n # something that locally looks like:\n # >\n # <\n # >\n # In these cases, the line has no \"room to move/rotate\" to the\n # sides so it does not represent an area of nonzero width\n # through which visual lines can pass. We test for those here.\n # \n # Note that we already partially test for the >< case when\n # we, just after the line creation, check if the two vertices\n # making up the line touch each other. That means that, here,\n # we only need to test cases where 3+ vertices are involved.\n if not intersectsObj and len(vertIntersections) > 2:\n vertIntersections.sort(key = (lambda a: a[\"distAlongLine\"]))\n \n sideSwitchCount = 0\n lineCannotMove = False\n \n prevDist = vertIntersections[0][\"distAlongLine\"]\n prevSide = vertIntersections[0][\"side\"]\n \n for lineVertIndex in range(1, len(vertIntersections)):\n currDist = vertIntersections[lineVertIndex][\"distAlongLine\"]\n currSide = vertIntersections[lineVertIndex][\"side\"]\n \n if currSide != prevSide:\n sideSwitchCount += 1\n # The \"><\" case:\n touchingCase = abs(currDist - prevDist) < EQUAL_THRESHOLD\n \n # The case of\n # >\n # <\n # >\n separatedCase = sideSwitchCount > 1\n \n if touchingCase or separatedCase:\n lineCannotMove = True\n break\n prevDist = currDist\n prevSide = currSide\n \n \n intersectsObj = intersectsObj or lineCannotMove\n \n # All of the intersection testing is finally done.\n # Now, if it didn't intersect, we can create active segments.\n if not intersectsObj:\n newSegments = self.createActiveSegments(i, j) \n \n for newSeg in newSegments:\n\n cKey = round(newSeg.p0[0], EQUAL_DECIMAL_PLACES)\n dictOfInterest = vertLineDict\n if not newSeg.isVertical:\n cKey = (round(newSeg.m, EQUAL_DECIMAL_PLACES), round(newSeg.b, EQUAL_DECIMAL_PLACES))\n dictOfInterest = nonVertLineDict\n\n if cKey in dictOfInterest:\n dictOfInterest[cKey].append(newSeg)\n else:\n dictOfInterest[cKey] = [newSeg]\n \n self.unifySegments(nonVertLineDict, False)\n self.unifySegments(vertLineDict, True)\n self.calculateVisualHull()\n return\n \n \n def unifySegments(self, segmentDictionary, isVertical):\n axisNum = 0\n axisKey = \"x\"\n if isVertical:\n axisNum = 1\n axisKey = \"y\"\n for _, segsToUnify in segmentDictionary.items():\n # Skip over the complex unification process if only one segment.\n if len(segsToUnify) == 1:\n self.activeSegments.append(segsToUnify[0])\n continue\n # Also skip over if it's just two \"type-C\" segments.\n if len(segsToUnify) == 2 and segsToUnify[0].activeType == SegmentType.C and segsToUnify[1].activeType == SegmentType.C:\n self.activeSegments.append(segsToUnify[0])\n self.activeSegments.append(segsToUnify[1])\n continue\n coordsOnLn = []\n for i in range(len(segsToUnify)):\n s = segsToUnify[i]\n if s.p0[axisNum] > s.p1[axisNum]:\n s.swapDir()\n coordsOnLn.append({\"x\": s.p0[0], \"y\": s.p0[1], \"index\": s.p0Index, \"segsStartingHere\": [i]})\n coordsOnLn.append({\"x\": s.p1[0], \"y\": s.p1[1], \"index\": s.p1Index, \"segsStartingHere\": []})\n \n coordsOnLn.sort(key = (lambda a: a[axisKey]))\n \n prevCoord = coordsOnLn[0]\n uniqueCoords = [prevCoord]\n for i in range(1, len(coordsOnLn)):\n coord = coordsOnLn[i]\n if abs(coord[axisKey] - prevCoord[axisKey]) > EQUAL_THRESHOLD:\n uniqueCoords.append(coord)\n else:\n uniqueCoords[-1][\"segsStartingHere\"] += coord[\"segsStartingHere\"]\n prevCoord = coord\n \n intervals = []\n for i in range(len(uniqueCoords) - 1):\n intervals.append( {\"right\": 0, \"left\": 0} )\n \n # This next bit looks O(n^3) at a glance.\n # But keep in mind that each segment only is in \"segsStartingHere\"\n # for one coord, and that the number of possible intervals\n # a segment can span is also limited.\n for i in range(len(uniqueCoords) - 1):\n coord = uniqueCoords[i]\n for sIndex in coord[\"segsStartingHere\"]:\n s = segsToUnify[sIndex]\n intervalIndex = i\n while uniqueCoords[intervalIndex][axisKey] < s.p1[axisNum] - EQUAL_THRESHOLD and intervalIndex < len(intervals):\n if s.increasesToTheRight:\n intervals[intervalIndex][\"right\"] += 1\n else:\n intervals[intervalIndex][\"left\"] += 1\n intervalIndex += 1\n for i in range(len(intervals)):\n interval = intervals[i]\n # The cancelling out effect over the interval. So no segment created.\n if interval[\"right\"] > 0 and interval[\"left\"] > 0:\n continue\n # No segment at all over this line\n elif interval[\"right\"] == 0 and interval[\"left\"] == 0:\n continue\n else:\n p0 = (uniqueCoords[i][\"x\"], uniqueCoords[i][\"y\"])\n p1 = (uniqueCoords[i+1][\"x\"], uniqueCoords[i+1][\"y\"])\n p0Index = (uniqueCoords[i][\"index\"])\n p1Index = (uniqueCoords[i+1][\"index\"])\n self.activeSegments.append(MyActiveLine(p0, p1, p0Index, p1Index, SegmentType.D, interval[\"right\"] > 0))\n \n def isLineInsideEdgeAngle(self, vertIndex, dirToTest):\n if self.isVertexConcave(vertIndex):\n return SideOfShapeType.INSIDE\n\n v0 = self.vertices[self.prevIndices[vertIndex]]\n v1 = self.vertices[vertIndex]\n v2 = self.vertices[self.nextIndices[vertIndex]]\n \n dir0 = v0 - v1\n dir1 = v2 - v1\n # Make sure both dirs are normalized\n length0 = np.linalg.norm(dir0)\n length1 = np.linalg.norm(dir1)\n dir0 = dir0 / length0\n dir1 = dir1 / length1\n \n \n # Get the line bisecting the vertex's angle\n unnormedBisect = dir0 + dir1\n bisector = unnormedBisect/np.linalg.norm(unnormedBisect)\n \n # If the dot product of candidate's dir with bisector is less than that\n # of one of the 2 edges of centre vert, then it doesn't go into the polygon\n dotThresh = abs(np.dot(bisector, dir0))\n testDot = abs(np.dot(bisector, dirToTest))\n if testDot <= dotThresh + EQUAL_THRESHOLD:\n # However, we then want to know which side of the shape the line's on.\n # Construct a local coordinate frame.\n # The \"up\" vector will be dirToTest.\n # From this, the \"right\" vector will be [y, -x]\n # The matrix to bring vectors into this local coord frame will be:\n # | y -x |\n # | x y | \n up = dirToTest\n changeBasis = np.array([\n [up[1], -up[0]],\n [up[0], up[1]]\n ])\n # Convert the bisector into this local coordinate frame via matrix mult. \n bisectorLocal = changeBasis @ bisector\n \n if bisectorLocal[0] > 0:\n return SideOfShapeType.LEFT\n else:\n return SideOfShapeType.RIGHT\n return SideOfShapeType.INSIDE\n \n # Take in vertices v0, v1, v2 and whether mesh is counter-clockwise (ccw).\n # Output whether v1 is a concave vertex.\n def isVertexConcave(self, vertIndex):\n v0 = self.vertices[self.prevIndices[vertIndex]]\n v1 = self.vertices[vertIndex]\n v2 = self.vertices[self.nextIndices[vertIndex]]\n cw = self.cwList[self.polygonIndices[vertIndex]]\n \n # Construct a local coordinate frame.\n # The \"up\" vector will be v2 - v1.\n # From this, the \"right\" vector will be [y, -x]\n # The matrix to bring vectors into this local coord frame will be:\n # | y -x |\n # | x y | \n up = v2 - v1\n changeBasis = np.array([\n [up[1], -up[0]],\n [up[0], up[1]]\n ])\n\n # Convert v0 - v1 into this local coordinate frame via matrix mult. \n backVecGlobal = v0 - v1\n backVec = changeBasis @ backVecGlobal\n \n # If the x-component of this is == 0, just treat it as convex I guess\n # If the x-component is negative and the polygon is clockwise, then concave\n # Alternatively, if the x-component is positive and the polygon is ccw, then concave.\n # Else, it is convex.\n if (backVec[0] > 0 and not cw) or (backVec[0] < 0 and cw):\n return True\n return False\n \n def sceneBorderHitPoints(self, ln):\n # We want the lines in the scene to be rendered \n # such that they extend all the way to the scene's bounding box.\n # To calculate where intersections with said box occur,\n # the following calculations are done.\n # First, initializing maxes/mins to inf.\n tForwardX = math.inf\n tForwardY = math.inf\n tBackwardX = math.inf\n tBackwardY = math.inf\n # We don't know, in each direction, if we'll hit a vertical\n # or horizontal border first, so we have to test both\n # x and y. \n borderX = 0.1*(self.maxX - self.minX)\n borderY = 0.1*(self.maxY - self.minY)\n forwardXHit = self.maxX + borderX\n forwardYHit = self.maxY + borderY\n backwardXHit = self.minX - borderX\n backwardYHit = self.minY -borderY\n # If the direction vector has a negative component,\n # then \"forward\" along it points to the min borders, not max ones.\n if ln.dir[0] < 0:\n forwardXHit, backwardXHit = backwardXHit, forwardXHit\n if ln.dir[1] < 0:\n forwardYHit, backwardYHit = backwardYHit, forwardYHit\n \n # If the direction vector is not vertical, see where it hits the x borders.\n if ln.dir[0] != 0:\n tForwardX = (forwardXHit - ln.p0[0])/ln.dir[0]\n tBackwardX = (ln.p0[0] - backwardXHit)/ln.dir[0]\n # If the direction vector is not horizontal, see where it hits the y borders.\n if ln.dir[1] != 0:\n tForwardY = (forwardYHit - ln.p0[1])/ln.dir[1]\n tBackwardY = (ln.p0[1] - backwardYHit)/ln.dir[1]\n \n # First hits get chosen.\n tForward = min(tForwardX, tForwardY)\n tBackward = -min(tBackwardX, tBackwardY)\n \n # Endpoints for the lines at these intersections created.\n newP0 = ln.p0 + tBackward*ln.dir\n newP1 = ln.p0 + tForward*ln.dir\n return (newP0, newP1)\n \n def calculateVisualHull(self): \n self.partitionMesh = self.findIntersections()\n \n self.drawableFaces = []\n for f in self.partitionMesh.faces.values():\n if self.partitionMesh.isExteriorFace(f):\n continue\n\n self.drawableFaces.append(f)\n \n print(\"Num of drawable faces:\", len(self.drawableFaces))\n\n # We know that the below vertex is on the shape.\n # Now we need to find out which of its faces has visual number 0.\n # First, we can assume the vertex is convex, else it wouldn't\n # be a part of an active segment processed above.\n # So, the \"bisector\" of its two edges points into the shape.\n # We just need to find two half edges that \"enclose\" it.\n # That would thus mean they also enclose that part of the shape.\n vertOnShape = self.partitionMesh.vertexOnShape\n vertIndex = vertOnShape.vertexID\n v0 = self.vertices[self.prevIndices[vertIndex]]\n v1 = self.vertices[vertIndex]\n v2 = self.vertices[self.nextIndices[vertIndex]]\n \n dir0 = v0 - v1\n dir1 = v2 - v1\n # Make sure both dirs are normalized\n length0 = np.linalg.norm(dir0)\n length1 = np.linalg.norm(dir1)\n dir0 = dir0 / length0\n dir1 = dir1 / length1\n \n \n # Get the line bisecting the vertex's angle\n unnormedBisect = dir0 + dir1\n bisector = unnormedBisect/np.linalg.norm(unnormedBisect)\n def atanPos(coord):\n regAtan = math.atan2(coord[1], coord[0])\n if regAtan < 0:\n regAtan += (2.0 * math.pi)\n return regAtan\n bisectorAngle = atanPos(bisector)\n \n startingFace = None\n vertHalfEdge = vertOnShape.outgoingHalfEdge\n while startingFace is None:\n v0Edge = vertHalfEdge\n # Next edge in ccw direction.\n v2Edge = vertHalfEdge.prev.pair\n \n v0 = v0Edge.headVertex.position\n v2 = v2Edge.headVertex.position\n # v1 is the same as in the bisector calculation.\n \n v0Angle = atanPos(v0 - v1)\n v2Angle = atanPos(v2 - v1)\n bisectorAngleCopy = bisectorAngle\n # Case where the v0 to v2 range crosses over 0 radians axis.\n if v2Angle < v0Angle:\n # To fix, \"rotate\" all angles so that v0 lies on 0 radian axis.\n v2Angle += (2.0 * math.pi) - v0Angle\n bisectorAngleCopy += (2.0 * math.pi) - v0Angle\n v0Angle = 0\n # It is now possible that bisectorAngleCopy > 2pi\n # Technically, this only happens if it is between v0 and v2,\n # meaning that we could just set startingFace here.\n # But it's not a huge gain in performance, so I'm leaving it \n # for now.\n if bisectorAngleCopy > (2.0 * math.pi):\n bisectorAngleCopy -= (2.0 * math.pi)\n \n if bisectorAngleCopy > v0Angle and bisectorAngleCopy < v2Angle:\n startingFace = v0Edge.leftFace\n \n vertHalfEdge = vertHalfEdge.pair.next\n \n\n # Now, DFS to assign visual numbers to all faces.\n stack = [{\"face\": startingFace, \"visualNumber\": 0}]\n while len(stack) > 0:\n currFace = stack.pop()\n if currFace[\"face\"].visualNumber < 0:\n vn = currFace[\"visualNumber\"]\n currFace[\"face\"].visualNumber = vn\n halfEdge = currFace[\"face\"].halfEdge\n adjFace = halfEdge.pair.leftFace\n if (not self.partitionMesh.isExteriorFace(adjFace)) and (adjFace in self.drawableFaces):\n vnChange = -1 if halfEdge.increasesLeft else 1\n stack.append({\"face\": adjFace, \"visualNumber\": vn+vnChange})\n \n origHalfEdge = halfEdge\n halfEdge = halfEdge.next\n while halfEdge != origHalfEdge:\n #if halfEdge is None:\n # break\n adjFace = halfEdge.pair.leftFace\n if (not self.partitionMesh.isExteriorFace(adjFace)) and (adjFace in self.drawableFaces):\n vnChange = -1 if halfEdge.increasesLeft else 1\n stack.append({\"face\": adjFace, \"visualNumber\": vn+vnChange})\n halfEdge = halfEdge.next \n \n \n # Sweep line implementation!\n def findIntersections(self):\n t = RedBlackTree()\n \n q = []\n # I'm pretty sure this next line's not needed, but I'm not taking chances right now. In a hurry.\n heapq.heapify(q)\n \n sortableSegments = []\n sweepLine = SweepLine(0, 0, EventType.LEFT)\n \n for i in range(len(self.activeSegments)):\n s = self.activeSegments[i]\n pL = s.p0\n pR = s.p1\n shouldSwap = False\n if pR[0] < pL[0] - EQUAL_THRESHOLD:\n shouldSwap = True\n elif abs(pR[0] - pL[0]) < EQUAL_THRESHOLD:\n if pR[1] < pL[1] - EQUAL_THRESHOLD:\n shouldSwap = True\n \n if shouldSwap:\n s.swapDir()\n \n sortableSegment = MySortableSegment(s, sweepLine, i)\n sortableSegments.append(sortableSegment)\n \n lEnd = MySweepEvent(s.p0[0], s.p0[1], {sortableSegment}, EventType.LEFT)\n rEnd = MySweepEvent(s.p1[0], s.p1[1], {sortableSegment}, EventType.RIGHT)\n \n heapq.heappush(q, lEnd)\n heapq.heappush(q, rEnd)\n \n #print(\"\\nq preview:\")\n #print(q[0:5])\n\n intersections = [] \n partitionMesh = HalfEdgeStructure()\n \n eventCount = 0\n \n while len(q) > 0:\n #print(\"\\nEvents:\", eventCount)\n eventCount += 1\n p = heapq.heappop(q)\n #print(\"Event: \", p)\n \n self.eventsRecord.append(p)\n \n # print(\"Intersections({0}):\".format(len(intersections)))\n # for isec in intersections:\n # print(isec, end=\", \")\n # print()\n \n if p.eventType == EventType.INTERSECTION:\n while q[0] == p:\n # print(\"merging:\", [s.index for s in p.segments], \",\", [s.index for s in q[0].segments])\n pToMerge = heapq.heappop(q)\n p.merge(pToMerge)\n # print(\"merged segSet:\", [s.index for s in p.segments])\n sweepLine.x = p.x\n sweepLine.y = p.y\n sweepLine.eventType = p.eventType\n \n if p.eventType == EventType.LEFT:\n s = next(iter(p.segments))\n \n sNode = t.add(s)\n s.node = sNode\n sNode.subscribers.add(s)\n \n succ = t.successor(sNode)\n pred = t.predecessor(sNode)\n if succ:\n succSeg = succ.value\n # print(\"succSeg:\", succSeg)\n succInt = s.intersection(succSeg)\n onFirstSegment = succInt.meetS > -EQUAL_THRESHOLD and succInt.meetS < s.length + EQUAL_THRESHOLD\n onSecondSegment = succInt.meetT > -EQUAL_THRESHOLD and succInt.meetT < succSeg.length + EQUAL_THRESHOLD\n \n if succInt.doMeet and onFirstSegment and onSecondSegment:\n intEvent = MySweepEvent(succInt.meetPt[0], succInt.meetPt[1], {s, succSeg}, EventType.INTERSECTION, eventCount-1)\n # print(\"\\tintEvent:\", intEvent)\n heapq.heappush(q, intEvent)\n \n if pred:\n predSeg = pred.value\n # print(\"predSeg:\", predSeg)\n predInt = s.intersection(predSeg)\n onFirstSegment = predInt.meetS > -EQUAL_THRESHOLD and predInt.meetS < s.length + EQUAL_THRESHOLD\n onSecondSegment = predInt.meetT > -EQUAL_THRESHOLD and predInt.meetT < predSeg.length + EQUAL_THRESHOLD\n \n if predInt.doMeet and onFirstSegment and onSecondSegment:\n intEvent = MySweepEvent(predInt.meetPt[0], predInt.meetPt[1], {s, predSeg}, EventType.INTERSECTION, eventCount-1)\n # print(\"\\tintEvent:\", intEvent)\n heapq.heappush(q, intEvent)\n \n elif p.eventType == EventType.RIGHT:\n s = next(iter(p.segments))\n \n sNode = s.node\n \n pred = t.predecessor(sNode)\n succ = t.successor(sNode)\n \n t.removeGivenNode(sNode)\n \n if (s.forwardHalfEdge is not None) and (s.forwardHalfEdge.headVertex is None): #possibly need to check x coords of pair.headVertex against current x coords?\n halfEdge = s.forwardHalfEdge \n halfEdge.prev.next = halfEdge.pair.next\n halfEdge.pair.next.prev = halfEdge.prev\n \n \n # The only thing on the side of an edge terminating outside of the convex hull\n # Is an exterior face, not an interior one. So, we delete any non-exterior\n # faces on both sides of this half-edge.\n if not partitionMesh.isExteriorFace(halfEdge.leftFace):\n partitionMesh.removeFace(halfEdge.leftFace)\n if not partitionMesh.isExteriorFace(halfEdge.pair.leftFace):\n partitionMesh.removeFace(halfEdge.pair.leftFace)\n \n partitionMesh.assignExteriorFace(halfEdge.prev)\n partitionMesh.assignExteriorFace(halfEdge.pair.next)\n \n \n partitionMesh.removeHalfEdgePair(halfEdge)\n\n \n if pred and succ:\n predSeg = pred.value\n succSeg = succ.value\n # print(\"predSeg:\", predSeg)\n # print(\"succSeg:\", succSeg)\n newInt = predSeg.intersection(succSeg)\n onFirstSegment = newInt.meetS > -EQUAL_THRESHOLD and newInt.meetS < predSeg.length + EQUAL_THRESHOLD\n onSecondSegment = newInt.meetT > -EQUAL_THRESHOLD and newInt.meetT < succSeg.length + EQUAL_THRESHOLD\n toTheRight = newInt.meetPt[0] > sweepLine.x + EQUAL_THRESHOLD\n onSweepLine = (abs(newInt.meetPt[0] - sweepLine.x) < EQUAL_THRESHOLD)\n higherOnSweepLine = onSweepLine and (newInt.meetPt[1] > sweepLine.y + EQUAL_THRESHOLD)\n if newInt.doMeet and onFirstSegment and onSecondSegment and (toTheRight or higherOnSweepLine):\n intEvent = MySweepEvent(newInt.meetPt[0], newInt.meetPt[1], {predSeg, succSeg}, EventType.INTERSECTION, eventCount-1)\n # print(\"\\tintEvent:\", intEvent)\n heapq.heappush(q, intEvent)\n # for sThing in t.valueList():\n # if not t.isMatchingNodeInTree(sThing.node):\n # print(\"Problem for\", sThing, \"node!!!\")\n \n else: # It's an intersection\n newElem = np.array((p.x, p.y))\n intersections.append(newElem)\n intSegments = deque(sorted(p.segments))\n \n # These segments will \"become\" min and max after the swaps.\n maxSeg = intSegments[0]\n minSeg = intSegments[-1]\n \n # For the face assignment, need to know which line segments\n # exist before and after this intersection.\n extendBeforeInt = []\n extendAfterInt = []\n \n for intSeg in intSegments:\n x0Diff = p.x - intSeg.p0[0]\n y0Diff = p.y - intSeg.p0[1]\n x1Diff = intSeg.p1[0] - p.x\n y1Diff = intSeg.p1[1] - p.y\n\n x0Before = x0Diff > EQUAL_THRESHOLD\n x0Equal = abs(x0Diff) < EQUAL_THRESHOLD\n y0Before = y0Diff > EQUAL_THRESHOLD\n \n x1After = x1Diff > EQUAL_THRESHOLD\n x1Equal = abs(x1Diff) < EQUAL_THRESHOLD\n y1After = y1Diff > EQUAL_THRESHOLD\n \n # Note: Before and After not mutually exclusive here.\n before = x0Before or (x0Equal and y0Before)\n after = x1After or (x1Equal and y1After)\n \n if before and intSeg.forwardHalfEdge:\n extendBeforeInt.append(intSeg)\n if after:\n extendAfterInt.insert(0, intSeg) \n \n # The half-edge data structure will have a vertex\n # at this intersection.\n newVertex = Vertex((p.x, p.y), None)\n\n # We need one vertex that's also an original polygon vertex\n # in order to choose a starting face for the visual number assignments\n if partitionMesh.vertexOnShape is None:\n segIndex = 0\n while newVertex.vertexID < 0 and segIndex < len(extendBeforeInt):\n vertSeg = extendBeforeInt[segIndex]\n if vertSeg.p1Index >= 0 and abs(vertSeg.p1[0] - p.x) < EQUAL_THRESHOLD and abs(vertSeg.p1[1] - p.y) < EQUAL_THRESHOLD:\n newVertex.vertexID = vertSeg.p1Index\n segIndex += 1\n segIndex = 0\n while newVertex.vertexID < 0 and segIndex < len(extendAfterInt):\n vertSeg = extendAfterInt[segIndex]\n if vertSeg.p0Index >= 0 and abs(vertSeg.p0[0] - p.x) < EQUAL_THRESHOLD and abs(vertSeg.p0[1] - p.y) < EQUAL_THRESHOLD:\n newVertex.vertexID = vertSeg.p0Index\n segIndex += 1\n partitionMesh.verts.append(newVertex)\n if newVertex.vertexID >= 0:\n partitionMesh.vertexOnShape = newVertex\n \n # Swap segment order in tree\n while len(intSegments) >= 2:\n s0 = intSegments.popleft()\n s1 = intSegments.pop()\n tempNode = s0.node\n s0.node = s1.node\n s1.node = tempNode\n s0.node.subscribers.remove(s1)\n s0.node.subscribers.add(s0)\n s1.node.subscribers.remove(s0)\n s1.node.subscribers.add(s1)\n s0.node.value = s0\n s1.node.value = s1\n \n s0.lastIntersectionY = p.y\n s1.lastIntersectionY = p.y\n \n # print(\"maxSeg:\", maxSeg)\n # print(\"minSeg:\", minSeg)\n \n pred = t.predecessor(minSeg.node)\n succ = t.successor(maxSeg.node)\n \n # For each half-edge that comes before the intersection:\n # - Assign the new vertex as the half-edge's head.\n # - \"Close\" the face created by each consecutive pair\n # of half-edges by connecting said half-edges.\n for i in range(len(extendBeforeInt)):\n preSeg = extendBeforeInt[i]\n preSeg.forwardHalfEdge.headVertex = newVertex\n if i < len(extendBeforeInt) - 1:\n nextHalfEdge = extendBeforeInt[i+1].forwardHalfEdge.pair\n preSeg.forwardHalfEdge.next = nextHalfEdge\n nextHalfEdge.prev = preSeg.forwardHalfEdge\n \n # Create the new half-edges for segments extending\n # past the intersection point.\n newForwardHalfEdges = []\n for i in range(len(extendAfterInt)):\n newForwardHalfEdges.append(partitionMesh.createNewPairOfHalfEdges(newVertex, not extendAfterInt[i].increasesToTheRight))\n \n # Handle the outermost half-edges in the \"fans\" before and/or after the intersection.\n # First two cases only have a fan on one side (before or after), creating a \"corner\".\n # Third case is when there are lines both before and after the intersection.\n if len(extendAfterInt) == 0:\n topForwardHalfEdge = extendBeforeInt[-1].forwardHalfEdge\n bottomBackHalfEdge = extendBeforeInt[0].forwardHalfEdge.pair\n topForwardHalfEdge.next = bottomBackHalfEdge\n bottomBackHalfEdge.prev = topForwardHalfEdge\n newVertex.outgoingHalfEdge = bottomBackHalfEdge\n # If this \"corner\" forms a concave \"dent\" in a region, then\n # the two faces on either side of the corner are actually\n # the same, but will have been created without \"knowing\"\n # that, so they'll currently be two separate ones.\n # Thus, this must be reconciled. We'll keep one and\n # replace the other with it.\n faceSetToRemove = set()\n if not partitionMesh.isExteriorFace(topForwardHalfEdge.leftFace):\n halfEdgeToReplaceFaceOn = topForwardHalfEdge\n while halfEdgeToReplaceFaceOn is not None and halfEdgeToReplaceFaceOn.leftFace != bottomBackHalfEdge.leftFace:\n faceSetToRemove.add(halfEdgeToReplaceFaceOn.leftFace)\n halfEdgeToReplaceFaceOn.leftFace = bottomBackHalfEdge.leftFace\n halfEdgeToReplaceFaceOn = halfEdgeToReplaceFaceOn.prev\n else:\n halfEdgeToReplaceFaceOn = bottomBackHalfEdge\n while halfEdgeToReplaceFaceOn is not None and halfEdgeToReplaceFaceOn.leftFace != topForwardHalfEdge.leftFace:\n faceSetToRemove.add(halfEdgeToReplaceFaceOn.leftFace)\n halfEdgeToReplaceFaceOn.leftFace = topForwardHalfEdge.leftFace\n halfEdgeToReplaceFaceOn = halfEdgeToReplaceFaceOn.next\n for faceToRemove in faceSetToRemove:\n partitionMesh.removeFace(faceToRemove)\n\n \n \n elif len(extendBeforeInt) == 0:\n bottomBackHalfEdge = newForwardHalfEdges[0].pair\n topForwardHalfEdge = newForwardHalfEdges[-1]\n bottomBackHalfEdge.next = topForwardHalfEdge\n topForwardHalfEdge.prev = bottomBackHalfEdge\n # Since this is a \"new\" corner, we cannot set the outer\n # half-edges' faces using ones that come from before\n # So, we need to find if we're inside a convex face.\n # To do this, we look at segments \"outside\" this \"fan\"\n # in the tree until we find something or reach the exterior.\n halfEdgePred = pred\n halfEdgeSucc = succ\n isOutsideFaceFound = False\n while halfEdgePred is not None and not isOutsideFaceFound:\n if halfEdgePred.value.forwardHalfEdge is not None:\n sharedFace = halfEdgePred.value.forwardHalfEdge.leftFace\n topForwardHalfEdge.leftFace = sharedFace\n bottomBackHalfEdge.leftFace = sharedFace\n isOutsideFaceFound = True\n else:\n halfEdgePred = t.predecessor(halfEdgePred)\n while halfEdgeSucc is not None and not isOutsideFaceFound:\n if halfEdgeSucc.value.forwardHalfEdge is not None:\n sharedFace = halfEdgeSucc.value.forwardHalfEdge.pair.leftFace\n topForwardHalfEdge.leftFace = sharedFace\n bottomBackHalfEdge.leftFace = sharedFace\n isOutsideFaceFound = True\n else:\n halfEdgeSucc = t.successor(halfEdgeSucc)\n \n if not isOutsideFaceFound:\n partitionMesh.assignExteriorFace(topForwardHalfEdge)\n partitionMesh.assignExteriorFace(bottomBackHalfEdge)\n \n newVertex.outgoingHalfEdge = newForwardHalfEdges[0]\n else:\n bottomHalfEdgeBeforeInt = extendBeforeInt[0].forwardHalfEdge.pair\n topHalfEdgeBeforeInt = extendBeforeInt[-1].forwardHalfEdge\n bottomHalfEdgeAfterInt = newForwardHalfEdges[0].pair\n topHalfEdgeAfterInt = newForwardHalfEdges[-1]\n\n newVertex.outgoingHalfEdge = bottomHalfEdgeBeforeInt\n \n topHalfEdgeBeforeInt.next = topHalfEdgeAfterInt\n topHalfEdgeAfterInt.prev = topHalfEdgeBeforeInt\n \n bottomHalfEdgeAfterInt.next = bottomHalfEdgeBeforeInt\n bottomHalfEdgeBeforeInt.prev = bottomHalfEdgeAfterInt\n \n topHalfEdgeAfterInt.leftFace = topHalfEdgeBeforeInt.leftFace\n bottomHalfEdgeAfterInt.leftFace = bottomHalfEdgeBeforeInt.leftFace\n \n # For each half-edge that comes after the intersection:\n # - Connect consecutive pairs of half-edges together\n # at the intersection point.\n # - Create a new face between each consecutive pair\n # of half-edges.\n\n for i in range(len(extendAfterInt) - 1):\n newForwardHalfEdges[i+1].pair.next = newForwardHalfEdges[i]\n newForwardHalfEdges[i].prev = newForwardHalfEdges[i+1].pair \n \n newFace = partitionMesh.createNewFace(newForwardHalfEdges[i])\n \n newForwardHalfEdges[i].leftFace = newFace\n newForwardHalfEdges[i].prev.leftFace = newFace\n \n # Assign the new forward half-edges to their respective segments.\n for i in range(len(extendAfterInt)):\n extendAfterInt[i].forwardHalfEdge = newForwardHalfEdges[i]\n \n # All of the face handling is now complete.\n # Next, normal line sweep intersection testing continues.\n # We need to check the new max and min segments against their\n # \"outside\" neighbour line segments for new intersections.\n if pred:\n predSeg = pred.value\n # print(\"predSeg:\", predSeg)\n predInt = minSeg.intersection(predSeg)\n onFirstSegment = predInt.meetS > -EQUAL_THRESHOLD and predInt.meetS < minSeg.length + EQUAL_THRESHOLD\n onSecondSegment = predInt.meetT > -EQUAL_THRESHOLD and predInt.meetT < predSeg.length + EQUAL_THRESHOLD\n toTheRight = predInt.meetPt[0] > sweepLine.x + EQUAL_THRESHOLD\n onSweepLine = (abs(predInt.meetPt[0] - sweepLine.x) < EQUAL_THRESHOLD)\n higherOnSweepLine = onSweepLine and (predInt.meetPt[1] > sweepLine.y + EQUAL_THRESHOLD)\n if predInt.doMeet and onFirstSegment and onSecondSegment and (toTheRight or higherOnSweepLine):\n intEvent = MySweepEvent(predInt.meetPt[0], predInt.meetPt[1], {minSeg, predSeg}, EventType.INTERSECTION, eventCount-1)\n if intEvent != p and (intEvent.x - sweepLine.x) > -EQUAL_THRESHOLD:\n # print(\"\\tintEvent:\", intEvent)\n heapq.heappush(q, intEvent)\n if succ:\n succSeg = succ.value\n # print(\"succSeg:\", succSeg)\n succInt = maxSeg.intersection(succSeg)\n onFirstSegment = succInt.meetS > -EQUAL_THRESHOLD and succInt.meetS < maxSeg.length + EQUAL_THRESHOLD\n onSecondSegment = succInt.meetT > -EQUAL_THRESHOLD and succInt.meetT < succSeg.length + EQUAL_THRESHOLD\n toTheRight = succInt.meetPt[0] > sweepLine.x + EQUAL_THRESHOLD\n onSweepLine = (abs(succInt.meetPt[0] - sweepLine.x) < EQUAL_THRESHOLD)\n higherOnSweepLine = onSweepLine and (succInt.meetPt[1] > sweepLine.y + EQUAL_THRESHOLD)\n if succInt.doMeet and onFirstSegment and onSecondSegment and (toTheRight or higherOnSweepLine):\n intEvent = MySweepEvent(succInt.meetPt[0], succInt.meetPt[1], {maxSeg, succSeg}, EventType.INTERSECTION, eventCount-1)\n if intEvent != p and (intEvent.x - sweepLine.x) > -EQUAL_THRESHOLD:\n # print(\"\\tintEvent:\", intEvent)\n heapq.heappush(q, intEvent)\n # t.printTree()\n # print(\"---\")\n # print(t.valueList())\n \n # for sThing in t.valueList():\n # if not t.isMatchingNodeInTree(sThing.node):\n # print(\"Problem for\", sThing, \"node!!!\")\n return partitionMesh\n\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.empty",
"numpy.dot",
"numpy.sum",
"numpy.copy",
"numpy.roll",
"numpy.where",
"numpy.vstack",
"numpy.arange",
"numpy.all",
"numpy.flip"
]
]
|
1364354238/PYTORCH_LEARNING | [
"d7ab877512ab41c80b37ab68bd1a42193916f31c"
]
| [
"chapter10-图像描述(Image Caption)/utils/beam_search.py"
]
| [
"#coding:utf8\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class for generating captions from an image-to-text model.\nAdapted from https://github.com/tensorflow/models/blob/master/im2txt/im2txt/inference_utils/caption_generator.py\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nfrom torch.autograd import Variable\nfrom torch.nn.functional import log_softmax\nimport heapq\nimport math\n\n\nclass Caption(object):\n \"\"\"Represents a complete or partial caption.\"\"\"\n\n def __init__(self, sentence, state, logprob, score, metadata=None):\n \"\"\"Initializes the Caption.\n\n Args:\n sentence: List of word ids in the caption.\n state: Model state after generating the previous word.\n logprob: Log-probability of the caption.\n score: Score of the caption.\n metadata: Optional metadata associated with the partial sentence. If not\n None, a list of strings with the same length as 'sentence'.\n \"\"\"\n self.sentence = sentence\n self.state = state\n self.logprob = logprob\n self.score = score\n self.metadata = metadata\n\n def __cmp__(self, other):\n \"\"\"Compares Captions by score.\"\"\"\n assert isinstance(other, Caption)\n if self.score == other.score:\n return 0\n elif self.score < other.score:\n return -1\n else:\n return 1\n\n # For Python 3 compatibility (__cmp__ is deprecated).\n def __lt__(self, other):\n assert isinstance(other, Caption)\n return self.score < other.score\n\n # Also for Python 3 compatibility.\n def __eq__(self, other):\n assert isinstance(other, Caption)\n return self.score == other.score\n\n\nclass TopN(object):\n \"\"\"Maintains the top n elements of an incrementally provided set.\"\"\"\n\n def __init__(self, n):\n self._n = n\n self._data = []\n\n def size(self):\n assert self._data is not None\n return len(self._data)\n\n def push(self, x):\n \"\"\"Pushes a new element.\"\"\"\n assert self._data is not None\n if len(self._data) < self._n:\n heapq.heappush(self._data, x)\n else:\n heapq.heappushpop(self._data, x)\n\n def extract(self, sort=False):\n \"\"\"Extracts all elements from the TopN. This is a destructive operation.\n\n The only method that can be called immediately after extract() is reset().\n\n Args:\n sort: Whether to return the elements in descending sorted order.\n\n Returns:\n A list of data; the top n elements provided to the set.\n \"\"\"\n assert self._data is not None\n data = self._data\n self._data = None\n if sort:\n data.sort(reverse=True)\n return data\n\n def reset(self):\n \"\"\"Returns the TopN to an empty state.\"\"\"\n self._data = []\n\n\nclass CaptionGenerator(object):\n \"\"\"Class to generate captions from an image-to-text model.\"\"\"\n\n def __init__(self,\n embedder,\n rnn,\n classifier,\n eos_id,\n beam_size=3,\n max_caption_length=20,\n length_normalization_factor=0.0):\n \"\"\"Initializes the generator.\n\n Args:\n model: recurrent model, with inputs: (input, state) and outputs len(vocab) values\n beam_size: Beam size to use when generating captions.\n max_caption_length: The maximum caption length before stopping the search.\n length_normalization_factor: If != 0, a number x such that captions are\n scored by logprob/length^x, rather than logprob. This changes the\n relative scores of captions depending on their lengths. For example, if\n x > 0 then longer captions will be favored.\n \"\"\"\n self.embedder = embedder\n self.rnn = rnn\n self.classifier = classifier\n self.eos_id = eos_id\n self.beam_size = beam_size\n self.max_caption_length = max_caption_length\n self.length_normalization_factor = length_normalization_factor\n\n def beam_search(self, rnn_input, initial_state=None):\n \"\"\"Runs beam search caption generation on a single image.\n\n Args:\n initial_state: An initial state for the recurrent model\n\n Returns:\n A list of Caption sorted by descending score.\n \"\"\"\n\n def get_topk_words(embeddings, state):\n output, new_states = self.rnn(embeddings, state)\n output = self.classifier(output.squeeze(0))\n logprobs = log_softmax(output)\n logprobs, words = logprobs.topk(self.beam_size, 1)\n return words.data, logprobs.data, new_states\n\n partial_captions = TopN(self.beam_size)\n complete_captions = TopN(self.beam_size)\n\n words, logprobs, new_state = get_topk_words(rnn_input, initial_state)\n for k in range(self.beam_size):\n cap = Caption(\n sentence=[words[0, k]],\n state=new_state,\n logprob=logprobs[0, k],\n score=logprobs[0, k])\n partial_captions.push(cap)\n\n # Run beam search.\n for _ in range(self.max_caption_length - 1):\n partial_captions_list = partial_captions.extract()\n partial_captions.reset()\n input_feed = torch.LongTensor([c.sentence[-1]\n for c in partial_captions_list])\n if rnn_input.is_cuda:\n input_feed = input_feed.cuda()\n input_feed = Variable(input_feed, volatile=True)\n state_feed = [c.state for c in partial_captions_list]\n if isinstance(state_feed[0], tuple):\n state_feed_h, state_feed_c = zip(*state_feed)\n state_feed = (torch.cat(state_feed_h, 1),\n torch.cat(state_feed_c, 1))\n else:\n state_feed = torch.cat(state_feed, 1)\n\n embeddings = self.embedder(input_feed).view(1, len(input_feed), -1)\n words, logprobs, new_states = get_topk_words(\n embeddings, state_feed)\n for i, partial_caption in enumerate(partial_captions_list):\n if isinstance(new_states, tuple):\n state = (new_states[0].narrow(1, i, 1),\n new_states[1].narrow(1, i, 1))\n else:\n state = new_states[i]\n for k in range(self.beam_size):\n w = words[i, k]\n sentence = partial_caption.sentence + [w]\n logprob = partial_caption.logprob + logprobs[i, k]\n score = logprob\n if w == self.eos_id:\n if self.length_normalization_factor > 0:\n score /= len(sentence)**self.length_normalization_factor\n beam = Caption(sentence, state, logprob, score)\n complete_captions.push(beam)\n else:\n beam = Caption(sentence, state, logprob, score)\n partial_captions.push(beam)\n if partial_captions.size() == 0:\n # We have run out of partial candidates; happens when beam_size\n # = 1.\n break\n\n # If we have no complete captions then fall back to the partial captions.\n # But never output a mixture of complete and partial captions because a\n # partial caption could have a higher score than all the complete\n # captions.\n if not complete_captions.size():\n complete_captions = partial_captions\n\n caps = complete_captions.extract(sort=True)\n\n return [c.sentence for c in caps], [c.score for c in caps]\n"
]
| [
[
"torch.autograd.Variable",
"torch.cat",
"torch.LongTensor",
"torch.nn.functional.log_softmax"
]
]
|
TACJu/Semantic-Coherence | [
"8b377006a28e09dc0beb283ad3f28ac6ff23020b"
]
| [
"han.py"
]
| [
"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nimport keras\nfrom keras import backend as K\nfrom keras import initializers,regularizers,constraints\nfrom keras.models import Model\nfrom keras.engine.topology import Layer\nfrom keras.layers import Dense, Input, Embedding, GRU, Bidirectional, TimeDistributed\nfrom keras.callbacks import TensorBoard, ModelCheckpoint\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\nplt.switch_backend('agg')\n\nclass LossHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.losses = {'batch': [], 'epoch': []}\n self.accuracy = {'batch': [], 'epoch': []}\n self.val_loss = {'batch': [], 'epoch': []}\n self.val_acc = {'batch': [], 'epoch': []}\n\n def on_batch_end(self, batch, logs={}):\n self.losses['batch'].append(logs.get('loss'))\n self.accuracy['batch'].append(logs.get('acc'))\n self.val_loss['batch'].append(logs.get('val_loss'))\n self.val_acc['batch'].append(logs.get('val_acc'))\n\n def on_epoch_end(self, batch, logs={}):\n self.losses['epoch'].append(logs.get('loss'))\n self.accuracy['epoch'].append(logs.get('acc'))\n self.val_loss['epoch'].append(logs.get('val_loss'))\n self.val_acc['epoch'].append(logs.get('val_acc'))\n\n def loss_plot(self, loss_type):\n iters = range(len(self.losses[loss_type]))\n plt.figure()\n plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')\n plt.plot(iters, self.losses[loss_type], 'g', label='train loss')\n if loss_type == 'epoch':\n plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')\n plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')\n plt.grid(True)\n plt.xlabel(loss_type)\n plt.ylabel('acc-loss')\n plt.legend(loc=\"upper right\")\n plt.savefig('./pictures/han_train.png')\n\ndef dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)\n\nclass Attention(Layer):\n def __init__(self,\n W_regularizer=None, u_regularizer=None, b_regularizer=None,\n W_constraint=None, u_constraint=None, b_constraint=None,\n bias=True, **kwargs):\n\n self.supports_masking = True\n self.init = initializers.get('glorot_uniform')\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.u_regularizer = regularizers.get(u_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.u_constraint = constraints.get(u_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n super(Attention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n\n self.W = self.add_weight((input_shape[-1], input_shape[-1],),\n initializer=self.init,\n name='{}_W'.format(self.name),\n regularizer=self.W_regularizer,\n constraint=self.W_constraint)\n if self.bias:\n self.b = self.add_weight((input_shape[-1],),\n initializer='zero',\n name='{}_b'.format(self.name),\n regularizer=self.b_regularizer,\n constraint=self.b_constraint)\n\n self.u = self.add_weight((input_shape[-1],),\n initializer=self.init,\n name='{}_u'.format(self.name),\n regularizer=self.u_regularizer,\n constraint=self.u_constraint)\n\n super(Attention, self).build(input_shape)\n\n def compute_mask(self, input, input_mask=None):\n return None\n\n def call(self, x, mask=None):\n uit = dot_product(x, self.W)\n\n if self.bias:\n uit += self.b\n\n uit = K.tanh(uit)\n ait = dot_product(uit, self.u)\n\n a = K.exp(ait)\n\n if mask is not None:\n a *= K.cast(mask, K.floatx())\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n a = K.expand_dims(a)\n weighted_input = x * a\n return K.sum(weighted_input, axis=1)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[-1]\n\ndef build_model(embedding_matrix):\n \n embedding_layer = Embedding(86678, 100, weights=[embedding_matrix], input_length=1000, trainable=True, mask_zero=True)\n\n word_input = Input(shape=(1000,), dtype='int32')\n embedded_sequences = embedding_layer(word_input)\n lstm_word = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)\n #word_dense = TimeDistributed(Dense(200))(lstm_word)\n attn_word = Attention()(lstm_word)\n sentenceEncoder = Model(sentence_input, attn_word)\n\n sentence_input = Input(shape=(1, 1000), dtype='int32')\n sentence_encoder = TimeDistributed(sentenceEncoder)(sentence_input)\n lstm_sentence = Bidirectional(GRU(100, return_sequences=True))(sentence_encoder)\n #sentence_dense = TimeDistributed(Dense(200))(lstm_word)\n attn_sentence = Attention()(lstm_sentence)\n pred = Dense(1, activation='sigmoid')(attn_sentence)\n model = Model(sentence_input, pred)\n\n model.summary()\n\nif __name__ == \"__main__\":\n\n history = LossHistory()\n\n X_train = np.load('./data/word vector/train_index_fix.npy')\n Y_train = np.load('./data/label/train_label.npy')\n X_val = np.load('./data/word vector/valid_index_fix.npy')\n Y_val = np.load('./data/label/valid_label.npy')\n X_train = np.expand_dims(X_train, axis=1)\n X_val = np.expand_dims(X_val, axis=1)\n embedding_matrix = np.load('./data/word vector/word_vector_fix.npy')\n\n model = build_model(embedding_matrix)\n\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\n filepath='./model/adam_model/model_{epoch:02d}-{val_acc:.2f}.hdf5'\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)\n tensorboard = TensorBoard('./adam_log/', write_graph=True)\n\n model.fit(X_train, Y_train, validation_data=(X_val, Y_val), callbacks=[checkpoint, tensorboard, history], epochs=10, batch_size=100)\n history.loss_plot('epoch')\n"
]
| [
[
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.load",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.expand_dims"
]
]
|
vishalbelsare/hddm | [
"bf10f05c956a590d93ec8f3b162ce60df668c18a"
]
| [
"setup.py"
]
| [
"from setuptools import setup\nfrom setuptools import Extension\n\ntry:\n from Cython.Build import cythonize\n ext_modules = cythonize([Extension('wfpt', ['src/wfpt.pyx'], language='c++'), # uncomment for OSX: , extra_compile_args=['-stdlib=libc++'], extra_link_args=['-stdlib=libc++', \"-mmacosx-version-min=10.9\"]),\n Extension('cdfdif_wrapper', ['src/cdfdif_wrapper.pyx', 'src/cdfdif.c']),\n Extension('data_simulators', ['src/cddm_data_simulation.pyx'], language='c++'),\n ], compiler_directives = {\"language_level\": \"3\"})\n\nexcept ImportError:\n ext_modules = [Extension('wfpt', ['src/wfpt.cpp'], language='c++'),\n Extension('cdfdif_wrapper', ['src/cdfdif_wrapper.c', 'src/cdfdif.c']),\n Extension('data_simulators', ['src/cddm_data_simulation.cpp'], language=\"c++\")\n ]\n\nimport numpy as np\n\nsetup(\n name='HDDM',\n version='0.9.2',\n author='Thomas V. Wiecki, Imri Sofer, Michael J. Frank, Mads Lund Pedersen, Alexander Fengler, Lakshmi Govindarajan',\n author_email='[email protected]',\n url='http://github.com/hddm-devs/hddm',\n packages=['hddm', 'hddm.tests', 'hddm.models', 'hddm.examples', 'hddm.torch', 'hddm.torch_models', 'hddm.simulators'], # 'hddm.cnn', 'hddm.cnn_models', 'hddm.keras_models',\n package_data={'hddm':['examples/*.csv', 'examples/*.conf', 'torch_models/*', 'simulators/*']}, # 'cnn_models/*/*' 'keras_models/*.h5',\n scripts=['scripts/hddm_demo.py'],\n description='HDDM is a python module that implements Hierarchical Bayesian estimation of Drift Diffusion Models.',\n install_requires=['NumPy >=1.6.0', 'SciPy >= 0.6.0', 'pandas >= 0.12.0', 'patsy', 'seaborn >= 0.11.0', 'statsmodels >= 0.12.0', 'tqdm >= 4.1.0', 'scikit-learn >= 0.24', 'cloudpickle >= 2.0.0', 'kabuki >= 0.6.0', 'PyMC >= 2.3.3, < 3.0.0'],\n setup_requires=['NumPy >=1.6.0', 'SciPy >= 0.6.0', 'pandas >= 0.12.0', 'patsy', 'seaborn >= 0.11.0', 'statsmodels >= 0.12.0', 'tqdm >= 4.1.0', 'scikit-learn >= 0.24', 'cloudpickle >= 2.0.0', 'kabuki >= 0.6.0', 'PyMC >= 2.3.3, < 3.0.0'],\n include_dirs = [np.get_include()],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n ext_modules = ext_modules\n)\n"
]
| [
[
"numpy.get_include"
]
]
|
GiuseppeTT/HDRBP | [
"5b7a01a345311ed0fbd2b82e64b955a1ee0ae6e6"
]
| [
"hdrbp/_step.py"
]
| [
"from __future__ import annotations\n\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\n\nfrom hdrbp.covariance import CovarianceEstimator\nfrom hdrbp.weight import WeightOptimizer\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Step:\n index: int\n data: StepData\n results: list[StepResult]\n\n\n@dataclass\nclass StepData:\n estimation: StepEstimationData\n holding: StepHoldingData\n\n\n@dataclass\nclass StepEstimationData:\n dates: pd.DatetimeIndex\n assets: pd.Index\n returns: np.ndarray\n covariates: Optional[np.ndarray] = None\n\n\n@dataclass\nclass StepHoldingData:\n dates: pd.DatetimeIndex\n assets: pd.Index\n returns: np.ndarray\n covariates: Optional[np.ndarray] = None\n\n\n@dataclass\nclass StepResult:\n estimation: StepEstimationResult\n holding: StepHoldingResult\n\n\n@dataclass\nclass StepEstimationResult:\n covariance_estimator: CovarianceEstimator\n weight_optimizer: WeightOptimizer\n covariances: np.ndarray\n weights: np.ndarray\n\n\n@dataclass\nclass StepHoldingResult:\n covariance_estimator: CovarianceEstimator\n weight_optimizer: WeightOptimizer\n weights: list[np.ndarray]\n returns: np.ndarray\n\n\ndef parse_steps(steps: list[Step]) -> pd.DataFrame:\n logger.debug(\"Step: Parsing steps\")\n\n results = _parse_steps(steps)\n results = _join_results(results)\n results = _rearrange_results(results)\n results = _clean_results(results)\n\n return results\n\n\ndef _parse_steps(steps):\n parsed_results = []\n for step in steps:\n index = step.index\n data = step.data\n logger.info(f\"Step: Parsing step {index}\")\n\n for result in step.results:\n covariance_estimator = result.estimation.covariance_estimator\n weight_optimizer = result.estimation.weight_optimizer\n logger.debug(f\"Step: Parsing result {covariance_estimator=} and {weight_optimizer=}\")\n\n estimation_parse = _parse_step_estimation(index, data.estimation, result.estimation)\n holding_parse = _parse_step_holding(index, data.holding, result.holding)\n\n parsed_result = pd.concat((estimation_parse, holding_parse))\n parsed_results.append(parsed_result)\n\n return parsed_results\n\n\ndef _parse_step_estimation(index, data, result):\n parse = {\n \"covariance_estimator\": [repr(result.covariance_estimator)],\n \"weight_optimizer\": [repr(result.weight_optimizer)],\n \"step\": [index],\n \"date\": [data.dates.max()],\n \"is_rebalance\": [True],\n \"rebalance_assets\": [data.assets],\n \"covariances\": [result.covariances],\n \"rebalance_weights\": [result.weights],\n }\n parse = pd.DataFrame(parse)\n\n return parse\n\n\ndef _parse_step_holding(index, data, result):\n date_count = data.dates.size\n parse = {\n \"covariance_estimator\": date_count * [repr(result.covariance_estimator)],\n \"weight_optimizer\": date_count * [repr(result.weight_optimizer)],\n \"step\": date_count * [index],\n \"date\": data.dates,\n \"is_rebalance\": date_count * [False],\n \"holding_assets\": date_count * [data.assets],\n \"holding_weights\": result.weights,\n \"return\": result.returns,\n }\n parse = pd.DataFrame(parse)\n\n return parse\n\n\ndef _join_results(results):\n # The duplicated groups come from a step's holding parse and the next\n # step's estimation parse. The pandas.core.groupby.GroupBy.last() method is\n # a working around that relies on the order that parsed results are\n # concatenated and the automatic NaN dropping. Its current effect is\n # merging those duplicated groups so that the joined parsed results makes\n # sense.\n\n logger.debug(\"Step: Joining parsed results\")\n\n results = pd.concat(results)\n results = (\n results.groupby([\"covariance_estimator\", \"weight_optimizer\", \"date\"]).last().reset_index()\n )\n\n return results\n\n\ndef _rearrange_results(results):\n logger.debug(\"Step: Rearranging joined results\")\n\n results[\"assets\"] = results.agg(_add_assets, axis=\"columns\")\n results[\"before_rebalance_assets\"] = results.agg(_add_before_rebalance_assets, axis=\"columns\")\n results[\"weights\"] = results.agg(_add_weights, axis=\"columns\")\n results[\"before_rebalance_weights\"] = results.agg(_add_before_rebalance_weights, axis=\"columns\")\n\n final_columns = [\n \"covariance_estimator\",\n \"weight_optimizer\",\n \"step\",\n \"date\",\n \"is_rebalance\",\n \"assets\",\n \"before_rebalance_assets\",\n \"covariances\",\n \"weights\",\n \"before_rebalance_weights\",\n \"return\",\n ]\n\n results = results[final_columns]\n\n return results\n\n\ndef _add_assets(row):\n if row[\"rebalance_assets\"] is not None:\n return row[\"rebalance_assets\"]\n else:\n return row[\"holding_assets\"]\n\n\ndef _add_before_rebalance_assets(row):\n if row[\"rebalance_assets\"] is not None:\n return row[\"holding_assets\"]\n else:\n return None\n\n\ndef _add_weights(row):\n if row[\"rebalance_weights\"] is not None:\n return row[\"rebalance_weights\"]\n else:\n return row[\"holding_weights\"]\n\n\ndef _add_before_rebalance_weights(row):\n if row[\"rebalance_weights\"] is not None:\n return row[\"holding_weights\"]\n else:\n return None\n\n\ndef _clean_results(results):\n type_map = {\n \"covariance_estimator\": \"string\",\n \"weight_optimizer\": \"string\",\n }\n results = results.astype(type_map)\n\n return results\n"
]
| [
[
"pandas.DataFrame",
"pandas.concat"
]
]
|
AlvarBer/Persimmon | [
"da08ed854dd0305d7e4684e97ee828acffd76b4d"
]
| [
"persimmon/view/blocks/knnblock.py"
]
| [
"from persimmon.view.pins import OutputPin\nfrom persimmon.view.blocks.block import Block # MYPY HACK\n\nfrom kivy.properties import ObjectProperty\nfrom kivy.lang import Builder\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nBuilder.load_file('persimmon/view/blocks/knnblock.kv')\n\nclass KNNBlock(Block):\n est_out = ObjectProperty()\n\n def function(self):\n self.est_out.val = KNeighborsClassifier()\n"
]
| [
[
"sklearn.neighbors.KNeighborsClassifier"
]
]
|
earthinversion/Dynamic-Time-Warping-based-Hierarchical-Agglomerative-Clustering | [
"c8945abdb24f0fbf292ecca753ba7961d08b75b2"
]
| [
"example_notebooks/dtwhaclustering/leastSquareModeling.py"
]
| [
"\"\"\"\ndtwhaclustering.leastSquareModeling\n------------------------------------\nLeast square modeling of GPS displacements for seasonality, tidal, co-seismic jumps.\n\n:author: Utpal Kumar\n:date: 2021/06\n:copyright: Copyright 2021 Institute of Earth Sciences, Academia Sinica.\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.optimize import least_squares\nimport matplotlib.pyplot as plt\nfrom analysis_support import toYearFraction\nimport matplotlib\nfrom tqdm import tqdm\n# from tqdm.notebook import tqdm\nimport os\nimport concurrent.futures\nimport sys\n\n# default matplotlib parameters\nimport matplotlib\nfont = {'family': 'Times',\n 'weight': 'bold',\n 'size': 22}\n\nmatplotlib.rc('font', **font)\nplt.style.use('ggplot')\n\n# to edit text in Illustrator\n# matplotlib.rcParams['pdf.fonttype'] = 42\n\n\nclass LSQmodules:\n def __init__(self, dUU, sel_eq_file=None, station_loc_file=\"helper_files/stn_loc.txt\", comp=\"U\", figdir=\"LSQOut\", periods=(13.6608, 14.7653, 27.5546, 182.62, 365.26, 18.6)):\n '''\n Perform least square modeling of the time series in dUU\n\n :param dUU: Pandas dataframe containing the time series for the modeling. Should have time information as pandas datetime format in the index\n :param sel_eq_file: File containing the earthquake origin times e.g., `2009,1,15` with header info, e.g. `year_val,month_val,date_val`\n :param station_loc_file: File containing the station location info, e.g., `DAWU,120.89004,22.34059`, with header `stn,lon,lat`\n :param comp: Component name of the data provided\n :param figdir: Output directory for the figures, if requested\n :param periods: Periods for the tidal and seasonal signals. e.g., (13.6608, 14.7653, 27.5546, 182.62, 365.26, 18.6). All in days except 18.6 years.\n '''\n self.dUU = dUU\n self.comp = comp\n # convert time to decimal year\n year = []\n for dd in self.dUU.index:\n year.append(round(toYearFraction(dd), 5))\n\n self.xval = np.array(year)\n\n # Periods in year for removal of tidal and seasonal signals\n yr = periods[4]\n P1 = periods[0]/yr\n P2 = periods[1]/yr\n P3 = periods[2]/yr\n P4 = periods[3]/yr\n P5 = yr/yr\n P6 = periods[5] # in year\n self.periods = np.array([pp for pp in [P1, P2, P3, P4, P5, P6]])\n if sel_eq_file and os.path.exists(sel_eq_file):\n # selected earthquakes for the removal using least squares method\n dftmp = pd.read_csv(sel_eq_file)\n tmp = []\n for yr, mn, dt in zip(dftmp['year_val'].values, dftmp['month_val'].values, dftmp['date_val'].values):\n if yr < 10:\n yr = '0{}'.format(yr)\n if mn < 10:\n mn = '0{}'.format(mn)\n tmp.append('{}-{}-{}'.format(yr, mn, dt))\n # print(tmp)\n evs = pd.DatetimeIndex(tmp) # string to pandas datetimeIndex\n # converting the events to year fraction\n events = []\n for ee in evs:\n ee_frac = round(toYearFraction(ee), 5)\n if self.xval.min() < ee_frac < self.xval.max():\n events.append(ee_frac)\n\n self.events = events\n else:\n self.events = []\n\n # read station information\n self.stnloc = pd.read_csv(station_loc_file, header=None,\n sep='\\s+', names=['stn', 'lon', 'lat'])\n self.stnloc.set_index('stn', inplace=True)\n del events, evs, dftmp, year\n\n self.stn_slope_file = f'stn_slope_res_{self.comp}.txt'\n if os.path.exists(self.stn_slope_file):\n os.remove(self.stn_slope_file)\n\n self.figdir = figdir\n os.makedirs(self.figdir, exist_ok=True)\n\n # defining the jump function\n def jump(self, t, t0):\n '''\n heaviside step function\n\n :param t: time data\n :param t0: earthquake origin time\n '''\n o = np.zeros(len(t))\n ind = np.where(t == t0)[0][0]\n o[ind:] = 1.0\n return o\n\n def compute_lsq(self, plot_results=False, remove_trend=True, remove_seasonality=True, remove_jumps=True, plotformat=None):\n '''\n Compute the least-squares model using multithreading\n\n :param plot_results: plot the final results\n :param remove_trend: return the time series after removing the linear trend \n :param remove_seasonality: return the time series after removing the seasonal signals\n :param remove_jumps: return the time series after removing the co-seismic jumps\n :param plotformat: plot format of the output figure, e.g. \"png\". \"pdf\" by default.\n '''\n def all_jumps(t, *cc):\n '''\n aggregate all jumps\n '''\n out = cc[0]*self.jump(t, self.events[0])\n for idx, ccval in enumerate(cc[1:]):\n eidx = idx+1\n out += ccval*self.jump(t, self.events[eidx])\n\n return out\n\n # defining the function for the removal of trend, seasonal, tidal and co-seismic signals\n\n def lsqfun(coeff, t, y):\n '''\n least squares function\n '''\n return coeff[0] + coeff[1] * t \\\n + coeff[2] * np.cos(2*np.pi * t/self.periods[0]) + coeff[3] * np.sin(2*np.pi * t/self.periods[0]) \\\n + coeff[4] * np.cos(2*np.pi * t/self.periods[1]) + coeff[5] * np.sin(2*np.pi * t/self.periods[1]) \\\n + coeff[6] * np.cos(2*np.pi * t/self.periods[2]) + coeff[7] * np.sin(2*np.pi * t/self.periods[2]) \\\n + coeff[8] * np.cos(2*np.pi * t/self.periods[3]) + coeff[9] * np.sin(2*np.pi * t/self.periods[3]) \\\n + coeff[10] * np.cos(2*np.pi * t/self.periods[4]) + coeff[11] * np.sin(2*np.pi * t/self.periods[4]) \\\n + all_jumps(t, *coeff[12:len(self.events)+12]) - y\n\n # defining the intial values\n x0 = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0]+[1.0]*len(self.events))\n\n # function for regenerating the data after removal of signals\n def gen_data(t, a, b, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, *cc):\n '''\n output model of least squares\n '''\n y = a + b * t+a1 * np.cos(2*np.pi * t/self.periods[0])+b1 * np.sin(2*np.pi * t/self.periods[0])\\\n + a2 * np.cos(2*np.pi * t/self.periods[1])+b2 * np.sin(2*np.pi * t/self.periods[1])\\\n + a3 * np.cos(2*np.pi * t/self.periods[2])+b3 * np.sin(2*np.pi * t/self.periods[2])\\\n + a4 * np.cos(2*np.pi * t/self.periods[3])+b4 * np.sin(2*np.pi * t/self.periods[3])\\\n + a5 * np.cos(2*np.pi * t/self.periods[4])+b5 * np.sin(2*np.pi * t/self.periods[4])\\\n + all_jumps(t, *cc)\n return y\n\n def _computelsq(stn, x0, yval1):\n '''\n Compute the lsq, and residuals\n '''\n try:\n res_lsq = least_squares(lsqfun, x0, args=(self.xval, yval1))\n trendU = res_lsq.x[0]+self.xval*res_lsq.x[1]\n\n seasonalityU = res_lsq.x[2] * np.cos(2*np.pi * self.xval/self.periods[0])+res_lsq.x[3] * np.sin(2*np.pi * self.xval/self.periods[0])+res_lsq.x[4] * np.cos(2*np.pi * self.xval/self.periods[1])+res_lsq.x[5] * np.sin(2*np.pi * self.xval/self.periods[1])+res_lsq.x[6] * np.cos(\n 2*np.pi * self.xval/self.periods[2])+res_lsq.x[7] * np.sin(2*np.pi * self.xval/self.periods[2])+res_lsq.x[8] * np.cos(2*np.pi * self.xval/self.periods[3])+res_lsq.x[9] * np.sin(2*np.pi * self.xval/self.periods[3])+res_lsq.x[10] * np.cos(2*np.pi * self.xval/self.periods[4])+res_lsq.x[11] * np.sin(2*np.pi * self.xval/self.periods[4])\n\n jumpsU = all_jumps(\n self.xval, *res_lsq.x[12:len(self.events)+12])\n residual = yval1\n residual_label = \"Residual\"\n\n if remove_trend or remove_seasonality or remove_jumps:\n residual_label += \" after removing\"\n # remove trend, seasonality, jumps\n if remove_trend:\n residual_label += \" trend\"\n residual -= trendU\n\n if remove_seasonality:\n residual_label += \" seasonality\"\n residual -= seasonalityU\n\n if remove_jumps:\n residual_label += \" jumps\"\n residual -= jumpsU\n\n resdU = np.array(residual)\n mresdU = np.mean(resdU)\n stdresdU = np.std(resdU)\n\n # removing outliers (> 3 std) with the mean of data\n maxStd = 3\n final_residual = []\n for xU in resdU:\n if (mresdU - maxStd*stdresdU < xU < mresdU + maxStd*stdresdU):\n final_residual.append(xU)\n else:\n final_residual.append(mresdU)\n\n with open(self.stn_slope_file, 'a') as ff:\n ff.write('{} {:.5f} {:.5f} {:.2f}\\n'.format(\n stn, self.stnloc.loc[stn, 'lon'], self.stnloc.loc[stn, 'lat'], res_lsq.x[1]))\n\n except KeyboardInterrupt:\n sys.quit()\n\n return stn, yval1, final_residual, res_lsq, trendU, seasonalityU, jumpsU, residual_label\n\n tasks = []\n final_dU = pd.DataFrame(index=self.dUU.index)\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n for i, stn in enumerate(self.dUU.columns):\n stn = stn.split(\"_\")[0]\n yval1 = np.array(self.dUU.iloc[:, i])\n tasks.append(executor.submit(\n _computelsq, stn, x0, yval1))\n\n pbar = tqdm(concurrent.futures.as_completed(tasks))\n pbar.set_description(f\"Computation started for {self.comp}...\")\n for tt in pbar:\n stn, yval1, final_residual, res_lsq, trendU, seasonalityU, jumpsU, residual_label = tt.result()\n pbar.set_description(f\"Finished LSQ ({self.comp}): {stn}\")\n colval = f\"{stn}_{self.comp}\"\n if plot_results:\n # plot fig\n mindisp, maxdisp = yval1.min(), yval1.max()\n pbar.set_description(f\"Plotting LSQ ({self.comp}): {stn}\")\n fig, ax = plt.subplots(5, 1, figsize=(10, 6), sharex=True)\n ax[0].plot(self.xval, yval1, \"bo\", markersize=1,\n label=f\"{stn}_{self.comp}\")\n ax[0].plot(self.xval, gen_data(self.xval, *res_lsq.x),\n 'k', lw=1, label='Least squares fit')\n ax[2].set_ylabel(\"Displacement (mm)\", color='k')\n ax[0].set_title(stn)\n ax[0].legend(loc=2)\n\n ax[1].plot(self.xval, trendU, 'k', lw=1,\n label='Trend, slope: {:.2f}'.format(res_lsq.x[1]))\n # ax[1].set_ylabel(\"Amp\", color='k')\n ax[1].legend(loc=2)\n\n ax[2].plot(self.xval, seasonalityU, 'k',\n lw=0.5, label='Seasonality')\n # ax[2].set_ylabel(\"Amp\", color='k')\n ax[2].legend(loc=2)\n\n ax[3].plot(self.xval, jumpsU, 'k', lw=1,\n label='Co Seismic Jumps')\n # ax[3].set_ylabel(\"Amp\", color='k')\n ax[3].legend(loc=2)\n\n ax[4].plot(self.xval, final_residual,\n 'k', lw=0.5, label=residual_label)\n # ax[4].set_ylabel(\"Displacement\", color='k')\n # ax[4].set_ylabel(\"Amp\", color='k')\n ax[4].legend(loc=2)\n\n for axx in ax:\n axx.set_ylim([mindisp, maxdisp])\n if plotformat:\n try:\n plt.savefig(os.path.join(\n self.figdir, f'time_series_{stn}_{self.comp}.{plotformat}'), bbox_inches='tight', dpi=200)\n except Exception as e:\n print(sys.exc_info())\n else:\n plt.savefig(os.path.join(\n self.figdir, f'time_series_{stn}_{self.comp}.pdf'), bbox_inches='tight')\n plt.close(\"all\")\n\n final_dU[colval] = final_residual\n del stn, yval1, final_residual, res_lsq, trendU, seasonalityU, jumpsU\n return final_dU\n\n\ndef lsqmodeling(dUU, dNN, dEE, stnlocfile, plot_results=True, remove_trend=False, remove_seasonality=True, remove_jumps=False, sel_eq_file=\"helper_files/selected_eqs_new.txt\", figdir=\"LSQOut\"):\n '''\n Least square modeling for the three component time series\n\n :param dUU: Vertical component pandas dataframe time series\n :param dNN: North component pandas dataframe time series\n :param dEE: East component pandas dataframe time series\n :param plot_results: plot the final results\n :param remove_trend: return the time series after removing the linear trend \n :param remove_seasonality: return the time series after removing the seasonal signals\n :param remove_jumps: return the time series after removing the co-seismic jumps\n :param sel_eq_file: File containing the earthquake origin times e.g., `2009,1,15` with header info, e.g. `year_val,month_val,date_val`\n :param stnlocfile: File containing the station location info, e.g., `DAWU,120.89004,22.34059`, with header `stn,lon,lat`\n :return: Pandas dataframe corresponding to the vertical, north and east components e.g., final_dU, final_dN, final_dE \n '''\n ################################################\n final_dU, final_dN, final_dE = None, None, None\n\n lsqmod_U = LSQmodules(dUU, sel_eq_file=sel_eq_file,\n station_loc_file=stnlocfile, comp=\"U\", figdir=figdir)\n final_dU = lsqmod_U.compute_lsq(\n plot_results=plot_results, remove_trend=remove_trend, remove_seasonality=remove_seasonality, remove_jumps=remove_jumps)\n del lsqmod_U\n\n lsqmod_N = LSQmodules(dNN, sel_eq_file=sel_eq_file,\n station_loc_file=stnlocfile, comp=\"N\", figdir=figdir)\n final_dN = lsqmod_N.compute_lsq(plot_results=plot_results, remove_trend=remove_trend,\n remove_seasonality=remove_seasonality, remove_jumps=remove_jumps)\n del lsqmod_N\n\n lsqmod_E = LSQmodules(dEE, sel_eq_file=sel_eq_file,\n station_loc_file=stnlocfile, comp=\"E\", figdir=figdir)\n final_dE = lsqmod_E.compute_lsq(plot_results=plot_results, remove_trend=remove_trend,\n remove_seasonality=remove_seasonality, remove_jumps=remove_jumps)\n del lsqmod_E\n\n return final_dU, final_dN, final_dE\n"
]
| [
[
"numpy.array",
"numpy.sin",
"pandas.DatetimeIndex",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.mean",
"matplotlib.rc",
"matplotlib.pyplot.subplots",
"numpy.where",
"numpy.std",
"matplotlib.pyplot.style.use",
"numpy.cos",
"pandas.read_csv",
"scipy.optimize.least_squares"
]
]
|
mjirik/seededitorqt | [
"d20a4e92ed7b462ea571c9d129aa31679d70f63d"
]
| [
"tests/widget_test.py"
]
| [
"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n# import logging\n# logger = logging.getLogger(__name__)\n# from __future__ import print_function\nfrom loguru import logger\nimport pytest\nimport os.path\n\npath_to_script = os.path.dirname(os.path.abspath(__file__))\n\n\n# import funkcí z jiného adresáře\nimport sys\nimport unittest\nimport scipy\nimport numpy as np\npath_to_script = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(path_to_script, \"../src/\"))\n# from nose.plugins.attrib import attr\n\n# from pysegbase import pycut\nimport seededitorqt\nimport seededitorqt.plugin\nimport numpy as np\nfrom PyQt5.QtWidgets import QApplication\n\nimport pytest\n\n\nclass SeedEditorPluginTest(unittest.TestCase):\n # @classmethod\n # def setUpClass(cls):\n # if sys.version_info.major < 3:\n # cls.assertCountEqual = cls.assertItemsEqual\n def test_addplugin(self):\n \"\"\"\n just run editor to see what is new\n Returns:\n \"\"\"\n app = QApplication(sys.argv)\n data = (np.random.rand(30, 31, 32) * 100).astype(np.int)\n data[15:40, 13:20, 10:18] += 50\n se = seededitorqt.QTSeedEditor(data)\n wg0 = seededitorqt.plugin.SampleThresholdPlugin()\n se.addPlugin(wg0)\n # se.exec_()\n # self.assertTrue(False)\n\n # @attr(\"interactive\")\n @pytest.mark.interactive\n # @pytest.mark.slow\n def test_show_editor(self):\n \"\"\"\n just run editor to see what is new\n Returns:\n \"\"\"\n app = QApplication(sys.argv)\n data = (np.random.rand(30, 31, 32) * 100).astype(np.int)\n data[15:40, 13:20, 10:18] += 50\n se = seededitorqt.QTSeedEditor(data)\n wg0 = seededitorqt.plugin.SampleThresholdPlugin()\n wg1 = seededitorqt.plugin.SampleThresholdPlugin()\n se.addPlugin(wg0)\n se.addPlugin(wg1)\n se.exec_()\n # self.assertTrue(False)\n\n # def test_show_draw_and_pickup_seed(self):\n # \"\"\"\n # just run editor to see what is new\n # Returns:\n #\n # \"\"\"\n # app = QApplication(sys.argv)\n # data = (np.random.rand(30,31,32) * 100).astype(np.int)\n # data[15:40, 13:20, 10:18] += 50\n # se = seededitorqt.QTSeedEditor(data)\n # se.slice_box.seed_mark = 3 # seed 3\n # se.slice_box.last_position = [1, 3]\n # se.slice_box.drawSeeds([10, 5])\n # se.slice_box.seed_mark = 2 #left mouse button\n # se.slice_box.last_position = [8, 1]\n # se.slice_box.drawSeeds([7, 5])\n # # try to pick up seed from slice\n # se.slice_box._pick_up_seed_label([1, 3])\n # self.assertEqual(se.textFocusedSeedLabel, \"3\", \"Pickuped value\")\n #\n # se.change_focus_seed_label(2)\n # self.assertEqual(se.textFocusedSeedLabel, \"2\", \"Changed value\")\n # # se.exec_()\n # def test_show_draw_and_pickup_segmentation_label(self):\n # \"\"\"\n # just run editor to see what is new\n # Returns:\n #\n # \"\"\"\n # app = QApplication(sys.argv)\n # data = (np.random.rand(30,31,32) * 100).astype(np.int)\n # data[15:40, 13:20, 10:18] += 50\n # segmentation = np.zeros_like(data)\n # segmentation[15:40, 13:20, 10:18] = 1\n # se = seededitorqt.QTSeedEditor(data, contours=segmentation)\n # se.selectSlice(20)\n # se.slice_box.seed_mark = 3 # seed 3\n # se.slice_box.last_position = [1, 3]\n # se.slice_box.drawSeeds([10, 5])\n # se.slice_box.seed_mark = 2 #left mouse button\n # se.slice_box.last_position = [8, 1]\n # se.slice_box.drawSeeds([7, 5])\n # # try to pick up seed from slice\n # se.slice_box._pick_up_segmentation_label([16, 16])\n # # self.assertEqual(se.textFocusedSeedLabel, \"3\", \"Pickuped value\")\n # idx = se.combo_segmentation_label.currentIndex()\n # logger.debug(\"idx {}\".format(idx))\n # self.assertEqual(idx, 1, \"Picked up value\")\n #\n # se.change_focus_segmentation_label(0)\n #\n # idx = se.combo_segmentation_label.currentIndex()\n # logger.debug(\"idx {}\".format(idx))\n # self.assertEqual(idx, 0, \"Changed value\")\n # # se.exec_()\n #\n # @attr('interactive')\n # def test_show_editor_with_too_much_wide_data(self):\n # \"\"\"\n # just run editor to see what is new\n # Returns:\n #\n # \"\"\"\n # app = QApplication(sys.argv)\n # data = (np.random.rand(30, 31, 150) * 100).astype(np.int)\n # data[15:40, 13:20, 10:18] += 50\n # se = seededitorqt.QTSeedEditor(data)\n # se.exec_()\n # # @attr('interactive')\n #\n # def test_draw_seed_function(self):\n # \"\"\"\n # just run editor to see what is new\n # Returns:\n #\n # \"\"\"\n # app = QApplication(sys.argv)\n # data = (np.random.rand(30,31,32) * 100).astype(np.int)\n # data[15:40, 13:20, 10:18] += 50\n # se = seededitorqt.QTSeedEditor(data)\n # se.slice_box.seed_mark = 1 #left mouse button\n # se.slice_box.last_position = [1, 3]\n # se.slice_box.drawSeeds([10, 5])\n # se.slice_box.seed_mark = 2 #left mouse button\n # se.slice_box.last_position = [8, 1]\n # se.slice_box.drawSeeds([7, 5])\n # # se.exec_()\n #\n # # @TODO znovu zprovoznit test\n #\n # # @unittest.skip(\"Cekame, az to Tomas opravi\")\n # def make_data(self, sz=32, offset=0, sigma=80):\n # seeds = np.zeros([sz, sz, sz], dtype=np.int8)\n # seeds[offset + 12, offset + 9:offset + 14, offset + 10] = 1\n # seeds[offset + 20, offset + 18:offset + 21, offset + 12] = 1\n # img = np.ones([sz, sz, sz])\n # img = img - seeds\n #\n # seeds[\n # offset + 3:offset + 15,\n # offset + 2:offset + 6,\n # offset + 27:offset + 29] = 2\n # img = scipy.ndimage.morphology.distance_transform_edt(img)\n # segm = img < 7\n # img = (100 * segm + sigma * np.random.random(img.shape)).astype(np.uint8)\n # return img, segm, seeds\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
]
| [
[
"numpy.random.rand"
]
]
|
tusharkh/PyGEM-Clone | [
"057d276871d398a3e5dcc8cd59226933a98b3be1"
]
| [
"download_era5_data.py"
]
| [
"\"\"\" Download ERA5 data from the command line \"\"\"\n# You must activate the ERA5_download environment in your command line first\n# Proceed to make sure you have created an account and saved your CDS API key in your home directory\n\n# Built-in libaries\nimport os\n# External libraries\nimport cdsapi\nimport numpy as np\n# Local libraries\nimport pygem_input as input\n\nclass era5_variable():\n \"\"\"\n ERA5 data properties used to automatically download data\n \n Attributes\n ----------\n vn : str\n variable name\n properties : dict\n dictionary containing properties associated with the ERA5 variable\n \"\"\"\n \n def __init__(self, vn):\n \"\"\"\n Add variable name and specific properties associated with each variable.\n \"\"\"\n # Dates formatted properly as a string\n year_list = np.arange(input.era5_downloadyearstart, input.era5_downloadyearend + 1)\n year_list = [str(x) for x in year_list]\n \n # Variable name\n self.vn = vn\n \n if self.vn == 'temperature':\n self.level = 'reanalysis-era5-single-levels'\n self.properties = {\n 'variable':'2m_temperature',\n 'product_type':'reanalysis',\n 'area':input.bounding_box,\n 'year':year_list,\n 'month':['01','02','03', '04','05','06','07','08','09', '10','11','12'],\n 'day':['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16',\n '17','18','19','20','21','22','23','24','25','26','27','28','29','30','31'],\n 'time':['00:00','01:00','02:00','03:00','04:00','05:00','06:00','07:00',\n '08:00', '09:00','10:00','11:00','12:00','13:00','14:00','15:00',\n '16:00','17:00','18:00','19:00','20:00','21:00','22:00','23:00'],\n 'format':'netcdf'\n }\n self.fn = input.era5_fp + input.era5_temp_fn\n \n \n elif self.vn == 'precipitation':\n self.level = 'reanalysis-era5-single-levels'\n self.properties = {\n 'variable':'total_precipitation',\n 'product_type':'reanalysis',\n 'area':input.bounding_box,\n 'year':year_list,\n 'month':['01','02','03', '04','05','06','07','08','09', '10','11','12'],\n 'day':['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16',\n '17','18','19','20','21','22','23','24','25','26','27','28','29','30','31'],\n 'time':['00:00','01:00','02:00','03:00','04:00','05:00','06:00','07:00',\n '08:00', '09:00','10:00','11:00','12:00','13:00','14:00','15:00',\n '16:00','17:00','18:00','19:00','20:00','21:00','22:00','23:00'],\n 'format':'netcdf'\n }\n self.fn = input.era5_fp + input.era5_prec_fn\n \n \n elif self.vn == 'geopotential':\n self.level = 'reanalysis-era5-single-levels'\n self.properties = {\n 'variable':'orography',\n 'product_type':'reanalysis',\n 'area':input.bounding_box,\n 'year':'2018',\n 'month':['01','02','03', '04','05','06','07','08','09', '10','11','12'],\n 'day':['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16',\n '17','18','19','20','21','22','23','24','25','26','27','28','29','30','31'],\n 'time':['00:00','01:00','02:00','03:00','04:00','05:00','06:00','07:00',\n '08:00', '09:00','10:00','11:00','12:00','13:00','14:00','15:00',\n '16:00','17:00','18:00','19:00','20:00','21:00','22:00','23:00'],\n 'format':'netcdf'\n }\n self.fn = input.era5_fp + input.era5_elev_fn\n \n \n elif self.vn == 'temperature_pressurelevels':\n self.level = 'reanalysis-era5-pressure-levels'\n self.properties = {\n 'variable':'temperature',\n 'product_type':'reanalysis',\n 'area':input.bounding_box,\n 'pressure_level':['300','350','400','450','500','550','600','650','700','750','775','800','825',\n '850','875','900','925','950','975','1000'],\n 'year':'2018',\n 'month':['01','02','03', '04','05','06','07','08','09', '10','11','12'],\n 'day':['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16',\n '17','18','19','20','21','22','23','24','25','26','27','28','29','30','31'],\n 'time':['00:00','01:00','02:00','03:00','04:00','05:00','06:00','07:00',\n '08:00', '09:00','10:00','11:00','12:00','13:00','14:00','15:00',\n '16:00','17:00','18:00','19:00','20:00','21:00','22:00','23:00'],\n 'format':'netcdf'\n }\n self.fn = input.era5_fp + input.era5_pressureleveltemp_fn\n \n\n#%% DOWNLOAD DATA FROM SERVER\n# Check directory to store data exists or create it\nif not os.path.isdir(input.era5_fp):\n os.makedirs(input.era5_fp)\n\n# Download data for each variable\nfor vn in input.era_varnames:\n # Create a Client instance\n c = cdsapi.Client()\n class_vn = era5_variable(vn)\n \n # Download data\n if not os.path.isfile(class_vn.fn):\n c.retrieve(class_vn.level, class_vn.properties, class_vn.fn)\n \n#%% LAPSE RATES\n## Create netcdf file for lapse rates using temperature data to fill in dimensions\n#temp = xr.open_dataset(input.eraint_fp + input.eraint_temp_fn)\n## Netcdf file for lapse rates ('w' will overwrite existing file)\n#if not os.path.isfile(input.eraint_fp + input.eraint_lr_fn):\n# netcdf_output = nc.Dataset(input.eraint_fp + input.eraint_lr_fn, 'w', format='NETCDF4')\n# # Global attributes\n# netcdf_output.description = 'Lapse rates from ERA Interim pressure level data that span the regions elevation range'\n# netcdf_output.history = 'Created ' + str(strftime(\"%Y-%m-%d %H:%M:%S\"))\n# netcdf_output.source = 'ERA Interim reanalysis data downloaded February 2018'\n# # Dimensions\n# latitude = netcdf_output.createDimension('latitude', temp['latitude'].values.shape[0])\n# longitude = netcdf_output.createDimension('longitude', temp['longitude'].values.shape[0])\n# time = netcdf_output.createDimension('time', None)\n# # Create dates in proper format for time dimension\n# startdate = input.eraint_start_date[0:4] + '-' + input.eraint_start_date[4:6] + '-' + input.eraint_start_date[6:]\n# enddate = input.eraint_end_date[0:4] + '-' + input.eraint_end_date[4:6] + '-' + input.eraint_end_date[6:]\n# startdate = datetime(*[int(item) for item in startdate.split('-')])\n# enddate = datetime(*[int(item) for item in enddate.split('-')])\n# startdate = startdate.strftime('%Y-%m')\n# enddate = enddate.strftime('%Y-%m')\n# dates = pd.DataFrame({'date' : pd.date_range(startdate, enddate, freq='MS')})\n# dates = dates['date'].astype(datetime)\n# # Variables associated with dimensions \n# latitude = netcdf_output.createVariable('latitude', np.float32, ('latitude',))\n# latitude.long_name = 'latitude'\n# latitude.units = 'degrees_north'\n# latitude[:] = temp['latitude'].values\n# longitude = netcdf_output.createVariable('longitude', np.float32, ('longitude',))\n# longitude.long_name = 'longitude'\n# longitude.units = 'degrees_east'\n# longitude[:] = temp['longitude'].values\n# time = netcdf_output.createVariable('time', np.float64, ('time',))\n# time.long_name = \"time\"\n# time.units = \"hours since 1900-01-01 00:00:00\"\n# time.calendar = \"gregorian\"\n# time[:] = nc.date2num(dates, units=time.units, calendar=time.calendar)\n# lapserate = netcdf_output.createVariable('lapserate', np.float64, ('time', 'latitude', 'longitude'))\n# lapserate.long_name = \"lapse rate\"\n# lapserate.units = \"degC m-1\"\n# \n# # Compute lapse rates\n# # Option 1 is based on pressure level data\n# if input.option_lr_method == 1: \n# # Compute lapse rates from temperature pressure level data\n# data = xr.open_dataset(input.eraint_fp + input.eraint_pressureleveltemp_fn) \n# # Extract the pressure levels [Pa]\n# if data['level'].attrs['units'] == 'millibars':\n# # Convert pressure levels from millibars to Pa\n# levels = data['level'].values * 100\n# # Compute the elevation [m a.s.l] of the pressure levels using the barometric pressure formula (pressure in Pa)\n# elev = -input.R_gas*input.temp_std/(input.gravity*input.molarmass_air)*np.log(levels/input.pressure_std)\n# # Compute lapse rate\n# for lat in range(0,latitude[:].shape[0]):\n# print(latitude[lat])\n# for lon in range(0,longitude[:].shape[0]):\n# data_subset = data['t'].isel(latitude=lat, longitude=lon).values\n# lapserate[:,lat,lon] = (((elev * data_subset).mean(axis=1) - elev.mean() * data_subset.mean(axis=1)) / \n# ((elev**2).mean() - (elev.mean())**2)) \n# # Option 2 is based on surrouding pixel data\n# elif input.option_lr_method == 2: \n# # Compute lapse rates from temperature and elevation of surrouding pixels\n# # Elevation data\n# geopotential = xr.open_dataset(input.eraint_fp + input.eraint_elev_fn)\n# if ('units' in geopotential.z.attrs) and (geopotential.z.units == 'm**2 s**-2'): \n# # Convert m2 s-2 to m by dividing by gravity (ERA Interim states to use 9.80665)\n# elev = geopotential.z.values[0,:,:] / 9.80665\n# # Compute lapse rate\n# for lat in range(1,latitude[:].shape[0]-1):\n# print('latitude:',latitude[lat])\n# for lon in range(1,longitude[:].shape[0]-1):\n# elev_subset = elev[lat-1:lat+2, lon-1:lon+2]\n# temp_subset = temp.t2m[:, lat-1:lat+2, lon-1:lon+2].values\n# # time, latitude, longitude\n# lapserate[:,lat,lon] = (\n# ((elev_subset * temp_subset).mean(axis=(1,2)) - elev_subset.mean() * \n# temp_subset.mean(axis=(1,2))) / ((elev_subset**2).mean() - (elev_subset.mean())**2))\n# netcdf_output.close()\n"
]
| [
[
"numpy.arange"
]
]
|
ClareTT/Ticket-Checking-System | [
"8d63f4d8663388856b073c8b823b0dfed9489b37"
]
| [
"pyfile/stationnamemap.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 7 20:47:48 2018\n\n@author: Administrator\n\"\"\"\n\nimport re\nimport requests\nimport pandas as pd\n\n\nif __name__ == '__main__':\n \n url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9066'\n r = requests.get(url)\n quoets = re.findall(u'([\\u4e00-\\u9fa5]+)\\|([A-Z]+)', r.text)# Match at least one Chinese character or one upper letter\n dataframe = pd.DataFrame(quoets)\n # write data to .csv file\n dataframe.to_csv('themappingfile.csv', index = False, sep = ',', encoding = 'utf_8_sig')\n # read data from .csv file\n df = pd.read_csv('themappingfile.csv')# The type of 'df' is 'DataFrame'"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv"
]
]
|
A-Jacobson/iEEG_Seizure_Prediction | [
"bdee7f4aab72674e01af7ec254b5d6ec7f65e620"
]
| [
"predict.py"
]
| [
"from sklearn.ensemble import AdaBoostClassifier\nimport cPickle\nfrom data_utils import load_data, preds_to_df, get_skipped\nimport pandas as pd\nimport numpy as np\n\n\nwith open('Ada_1.pkl', 'rb') as f:\n Ada_1 = cPickle.load(f)\nwith open('Ada_3.pkl', 'rb') as f:\n Ada_2 = cPickle.load(f)\nwith open('Ada_3.pkl', 'rb') as f:\n Ada_3 = cPickle.load(f)\n\nX_1, files_1 = load_data('test_1', features=True)\nX_2, files_2 = load_data('test_2', features=True)\nX_3, files_3 = load_data('test_3', features=True)\n\nX_1 = np.array(list(X_1))\nX_2 = np.array(list(X_2))\nX_3 = np.array(list(X_3))\n\npreds_1 = preds_to_df(Ada_1.predict(X_1), files_1)\npreds_2 = preds_to_df(Ada_2.predict(X_2), files_2)\npreds_3 = preds_to_df(Ada_2.predict(X_3), files_3)\n\n# get preds for skipped examples\nskipped_1 = get_skipped('test_1')\nskipped_2 = get_skipped('test_2')\nskipped_3 = get_skipped('test_3')\n\nsubmit = pd.concat([preds_1, skipped_1, preds_2,\n skipped_2, preds_3, skipped_3])\n\nsubmit = submit[['File', 'Class']]\n\nsubmit.to_csv('ada100_fft_time_corr.csv',\n index=False)\n"
]
| [
[
"pandas.concat"
]
]
|
ahesford/pycwp | [
"5fabc9c1f410bf49b674bfb4427fe1f05ad251ed"
]
| [
"shell/segmentation.py"
]
| [
"#!/usr/bin/env python\n\n# Copyright (c) 2015 Andrew J. Hesford. All rights reserved.\n# Restrictions are listed in the LICENSE file distributed with this package.\n\nimport sys, os, numpy as np, getopt\nfrom pycwp import mio, segmentation, process\n\ndef usage(progname = 'segmentation.py'):\n\tbinfile = os.path.basename(progname)\n\tprint(\"Usage:\", binfile, \"[-h] [-n] [-p p] [-d scatden] [-s scatsd] [-g w,s]\")\n\tprint(\"\\t[-c chunk] <segfile> <paramfile> <sndfile> <atnfile> <denfile>\")\n\tprint()\n\tprint(\"\\t-n: Disable random variations\")\n\tprint(\"\\t-p: Use p processors (default: CPU count)\")\n\tprint(\"\\t-d: Assume a random scatterer fractional density scatden\")\n\tprint(\"\\t-s: Smooth random scatterers with Gaussian of standard deviation scatsd\")\n\tprint(\"\\t-g: Smooth tissue with Gaussian of width w and standard deviation s\")\n\tprint(\"\\t-c: Process output chunk slices at a time (default: 8)\")\n\ndef mapblks(segfile, outputs, params, start, stride, chunk, **kwargs):\n\t'''\n\tOpen the segmentation file segfile, output files in the list outputs,\n\tand loop through the segmentation in strides to produce output\n\tparameter maps. kwargs are optional arguments to be passed to the\n\tsegmentation routine.\n\n\tThe arguments start and stride refer to chunks rather than slices.\n\t'''\n\t# Open the files\n\tseg = mio.Slicer(segfile)\n\tsndfile, atnfile, denfile = [mio.Slicer(o) for o in outputs]\n\n\t# Add the chunk size to the kwargs for convenience\n\tkwargs['chunk'] = chunk\n\n\t# Loop through the chunks to process output\n\tfor n in range(start * chunk, seg.shape[-1], stride * chunk):\n\t\tprint('Processing chunk', n)\n\t\tsnd, atn, den = segmentation.maptissueblk(seg, params, n, **kwargs)\n\t\t# Figure out how many slices need to be written\n\t\toend = min(seg.shape[-1], n + snd.shape[-1])\n\t\t# Write the outputs\n\t\tsndfile[n:oend] = snd\n\t\tatnfile[n:oend] = atn\n\t\tdenfile[n:oend] = den\n\n\ndef main (argv = None):\n\tif argv is None:\n\t\targv = sys.argv[1:]\n\t\tprogname = sys.argv[0]\n\n\t# Default values\n\trandom = True\n\tnproc = process.preferred_process_count()\n\tchunk = 8\n\n\toptlist, args = getopt.getopt (argv, 'p:nd:s:c:g:h')\n\n\t# Extra arguments are added as kwargs\n\tkwargs = {}\n\n\t# Parse the options list\n\tfor opt in optlist:\n\t\tif opt[0] == '-n': random = False\n\t\telif opt[0] == '-p': nproc = int(opt[1])\n\t\telif opt[0] == '-d': kwargs['scatden'] = float(opt[1])\n\t\telif opt[0] == '-s': kwargs['scatsd'] = float(opt[1])\n\t\telif opt[0] == '-c': chunk = int(opt[1])\n\t\telif opt[0] == '-g':\n\t\t\tkstr = opt[1].split(',')\n\t\t\tkwargs['smoothp'] = [int(kstr[0]), float(kstr[1])]\n\t\telse:\n\t\t\tusage (progname)\n\t\t\treturn 128\n\n\t# The segmentation file and the parameter file must be specified\n\tif len(args) < 5:\n\t\tusage (progname)\n\t\treturn 128\n\n\t# Read the tissue parameters\n\tpmat = np.loadtxt(args[1])\n\t# Split the parameters into sound speed, attenuation and density\n\tparams = [p.tolist() for p in [pmat[:,:2], pmat[:,2:4], pmat[:,4:6]]]\n\t# Eliminate the standard deviation if random scatterers are not desired\n\tif not random: params = [[[p[0], None] for p in pv] for pv in params]\n\n\t# Grab the shape of the segmentation file and the number of slices\n\tsegfile = mio.Slicer(args[0])\n\t# The output files need to be created and truncated\n\toutputs = args[2:]\n\toutfiles = [mio.Slicer(o, segfile.shape, segfile.dtype, True) for o in outputs]\n\n\ttry:\n\t\twith process.ProcessPool() as pool:\n\t\t\tfor n in range(nproc):\n\t\t\t\targs = (args[0], outputs, params, n, nproc, chunk)\n\t\t\t\tpool.addtask(target=mapblks, args=args, kwargs=kwargs)\n\t\t\tpool.start()\n\t\t\tpool.wait()\n\texcept:\n\t\tfor f in outfiles: f._backer.truncate(0)\n\t\traise\n\n\treturn 0\n\nif __name__ == \"__main__\":\n\tsys.exit (main ())\n"
]
| [
[
"numpy.loadtxt"
]
]
|
SamScholten/stereo_vision | [
"a7e3eb1decc1868d97ffcee7352dce0c08e6bfbb"
]
| [
"calibration_tools.py"
]
| [
"\"\"\"\n===============================================================================\ncalibration_tools\n- Sam Scholten 2019/04/29\n-------------------------------------------------------------------------------\n\nModule with tools for calibrating a stereo vision system. The core of the\nmodule is kept in the CalibrationSystem class, which is initialised pointing to\nimage and data directories used for reading and writing all information, as\nwell as the camera it is calibrating.\n\nThere are three secondary classes. The first is the CalibrationPlate, used to\nrepresent the physical (empircally known) calibration plate used. The Plate is\nassumed to be a grid of dots, but can be abstracted to other methods - note\nthere is no support for this in for example the correlation_tools module.\nThere is then the CalibrationImage class, which represents/holds an actual\nimage of the calibration plate at some z value from the camera(s), and a Class\ncalled the CalibrationImageSet which is a conveniency class for reading all of\nthe images to the script in sorted order.\n\nThe meat of the module is to get the pixel coordinates of the dots, then make\na polymodel, via a function in MATLAB (using the python MATLAB API) called\npolyfitn (https://au.mathworks.com/matlabcentral/fileexchange/34765-polyfitn),\nout of the pixel coordinates and the empirically known dot locations in real\nspace.\n\nRelies heavily on correlation_tools.\nRelies on stereo_camera_tools to provide information on the camera size it is\ncalibrating to.\nMATLAB API needs to be installed independently of command line/pip etc., and\nrequires a license\n - see MATLAB documentation.\n\n--------\nObjects:\n-------------------------------------------------------------------------------\nCalibrationSystem\n-------------------------------------------------------------------------------\n Methods:\n --------\n __init__(camera, img_dir, data_dir)\n add_cal_plate(cal_plate)\n add_cal_images(path_dict)\n add_dot_object(dot)\n get_sub_pixel_coords()\n make_polymodel(deg)\n pickle_polymodel(filename)\n polymodel(filename)\n\n-------------------------------------------------------------------------------\nCalibrationPlate\n-------------------------------------------------------------------------------\n Methods:\n --------\n __init__(seperation, num_down, num_right, z_vals, parent)\n get_array() - uses the attributes listed in init\n\n-------------------------------------------------------------------------------\nCalibrationImageSet\n-------------------------------------------------------------------------------\n Methods:\n --------\n __init__(path_dict)\n make_cal_img_list() - uses path_dict in init\n creates CalibrationImage(s)\n\n-------------------------------------------------------------------------------\nCalibrationImage\n-------------------------------------------------------------------------------\n Methods:\n --------\n __init__(z, left_path, right_path, camera)\n get_array() - creates TiffCorrObject from left and right paths\n\n--------\nFunctions:\n-------------------------------------------------------------------------------\n None\n-------------------------------------------------------------------------------\n\n===============================================================================\nExample\n+------------------------------------------------------------------------------\nimport matlab.engine\nimport correlation_tools as corrt\nimport calibration_tools as calbt\nimport stereo_vision_tools as camt\nfrom pathlib import Path\n\nimg_dir = Path.cwd()/'images'\ndata_dir = Path.cwd()/'data'\nmlab = matlab.engine.start_matlab()\n\ncal_path_dict = {1900: (\"cal_imgs/cal_image_left_1900.tiff\",\n \"cal_imgs/cal_image_right_1900.tiff\")}\n\ncamera = camt.Camera(1801, 2400, img_dir, data_dir, mlab)\n\ncal_system = calbt.CalibrationSystem(camera, img_dir, data_dir)\ncal_plate = calbt.CalibrationPlate(50, 17, 21, cal_path_dict.keys(),\n parent=cal_system)\ncal_system.add_cal_plate(cal_plate)\ncal_system.add_cal_images(cal_path_dict)\n\ndot = corrt.GaussianCorrObject(parent=cal_system)\n\ncal_system.add_dot_object(dot)\n\ncal_system.get_sub_pixel_coords()\ncal_system.make_polymodel(deg=3.0)\ncal_system.pickle_polymodel('polymodel3.p')\ncamera.add_polymodel(\"polymodel3.p\", method=\"pickled\")\n+------------------------------------------------------------------------------\n+------------------------------------------------------------------------------\n\"\"\"\n\n__author__ = \"Sam Scholten\"\n\n\nimport numpy as np # basic numerics/array ops\nimport click # beautiful command line control\nimport pickle # save/read python object to/from disk\nimport matlab.engine # use matlab to make polyfit\n\nimport correlation_tools as corrt\n\n###############################################################################\n\n\nclass CalibrationSystem(object):\n \"\"\"\n Class to create a calibration function for a stereo vision system.\n Assumes dot-finding rectangular calibration plate, although it can be made\n general.\n \"\"\"\n\n def __init__(self, camera, img_dir, data_dir):\n \"\"\"\n define the pixels on the camera, we're going to pad all input\n images up to these dimensions (actually get it from parent camera)\n \"\"\"\n self.parent = self.camera = camera\n self.img_dir = img_dir\n self.data_dir = data_dir\n\n self.cal_plate = self.dot = self.cal_img_list = self.cal_img_set = None\n self.sub_pixel_coords = self.real_coords = None\n self.polymodel = None\n\n ###########################################################\n\n def add_cal_plate(self, cal_plate):\n \"\"\"\n Add a calibration plate to the system, this gives us the real space\n coordinates of the dots\n \"\"\"\n if type(cal_plate) is not CalibrationPlate:\n raise TypeError('cal_plate needs to be of type CalibrationPlate')\n self.cal_plate = cal_plate\n self.real_coords = self.cal_plate.ar\n\n ###########################################################\n\n def add_cal_images(self, path_dict):\n \"\"\"\n Step through the dictionary of calibration images (paths) and\n add them all (in ascending z order) to the system\n \"\"\"\n self.cal_img_set = CalibrationImageSet(path_dict, self)\n self.cal_img_list = self.cal_img_set.cal_img_list\n\n ###########################################################\n\n def add_dot_object(self, dot):\n \"\"\" Add a distribution to use to find dots. I.e. could be Gaussian,\n Laplace etc.\n \"\"\"\n if not isinstance(dot, corrt.CorrObject):\n raise TypeError(\"dot object needs to be a CorrObject\")\n self.dot = dot\n\n ###########################################################\n\n def get_sub_pixel_coords(self):\n \"\"\"\n Step through all the calibration images, get all the coordinates\n of the pixel locations, compile into an array.\n \"\"\"\n\n if self.cal_img_list is None:\n raise RuntimeError(\"Couldn't find the cal_img_list... \" +\n \"try using add_cal_images first\")\n if self.dot is None:\n raise RuntimeError(\"No dot object found, try using add_dot_object\")\n\n if self.cal_plate is None:\n raise RuntimeError(\"couldn't find calibration plate, add it first\")\n\n coords_list = None\n # so we have the cal_img_set, step through images, correlate...\n num_d = self.cal_plate.num_d\n num_r = self.cal_plate.num_r\n with click.progressbar(self.cal_img_list,\n label='dot finding progress') as bar:\n for cal_img in bar:\n # left image\n left_sys = corrt.CorrSystem(self.img_dir, self.data_dir,\n parent=self)\n left_sys.add_template(self.dot)\n left_sys.add_search_region(cal_img.left)\n left_sys.cross_correlate(method='spectral')\n\n # highlight=True here to graphically see dot detection\n left_coords = left_sys.find_dots(num_d, num_r, highlight=False)\n\n # right image\n right_sys = corrt.CorrSystem(self.img_dir, self.data_dir,\n parent=self)\n right_sys.add_template(self.dot)\n right_sys.add_search_region(cal_img.right)\n right_sys.cross_correlate(method='spectral')\n right_coords = right_sys.find_dots(num_d, num_r,\n highlight=False)\n coords = np.concatenate((left_coords, right_coords), axis=1)\n\n if coords_list is None:\n coords_list = coords\n else:\n coords_list = np.concatenate((coords_list, coords), axis=0)\n\n self.sub_pixel_coords = np.array(coords_list)\n\n ###########################################################\n\n def make_polymodel(self, deg=3.0):\n \"\"\"\n Make a polymodel/calibration function for the system.\n deg = degree we want to take the polyfit to (needs to be a float as\n MATLAB doesn't play nice with ints)\n \"\"\"\n if self.sub_pixel_coords is None:\n raise RuntimeError(\"couldn't find sub pixel coordinates\")\n if type(deg) is not float:\n raise TypeError(\"deg needs to be a float!\")\n\n mpcoords = matlab.double(\n np.array(np.around(self.sub_pixel_coords, decimals=0),\n dtype=float\n ).tolist())\n\n # We need to convert to list first so matlab recognises it,\n # then wrap it in a method that converts to a matlab array of doubles\n # [0] here converts to 1D vector (MATLAB is weird)\n y_real = matlab.double(self.real_coords[:, 0].copy().tolist())[0]\n x_real = matlab.double(self.real_coords[:, 1].copy().tolist())[0]\n z_real = matlab.double(self.real_coords[:, 2].copy().tolist())[0]\n\n x_model = self.camera.mlab.polyfitn(mpcoords, x_real, deg)\n y_model = self.camera.mlab.polyfitn(mpcoords, y_real, deg)\n z_model = self.camera.mlab.polyfitn(mpcoords, z_real, deg)\n\n # so this is a dict of dicts\n\n # these models are dicts as well (well they're structs in matlab)\n # we save these, then pass them to polyvaln (in the matlab engine)\n # when we want to actually evaluate something\n\n # e.g. camera.mlab.polyvaln(polymodel['x'], v) where v is:\n # v = matlab.double([il, jl, ir, jr])\n\n self.polymodel = {'x': x_model, 'y': y_model,\n 'z': z_model}\n\n ###########################################################\n\n def pickle_polymodel(self, filename='polymodel.p'):\n \"\"\"\n Saves the interpolation(s) to filename, using pickle.\n This allows us to read in the calibration quickly without reading\n all of the images etc. which is quite time consuming\n \"\"\"\n if self.polymodel is None:\n raise RuntimeError(\"coundn't find a !\")\n path = str(self.data_dir) + '/' + filename\n pickle.dump(self.polymodel, open(path, \"wb\"))\n\n ###########################################################\n\n def polymodel(self, filename=None):\n \"\"\"\n gets the interpolation from the system, if filename is specified\n it looks for a pickled interpolation file\n \"\"\"\n # returns interpolation object (as tuple of objs or something)\n if not filename:\n if self.polymodel is None:\n raise RuntimeError(\n \"coundn't find a non-pickled polymodel!\")\n return self.polymodel\n else:\n path = str(self.data_dir) + '/' + filename\n polymodel = pickle.load(open(path, \"rb\"))\n return polymodel\n\n###############################################################################\n\n\nclass CalibrationPlate(object):\n \"\"\"\n A construction, via our empirical knowledge of the plates, of the real\n space coordinates of the dots on the Calibration plate\n \"\"\"\n def __init__(self, separation, num_down, num_right, z_vals, parent=None):\n \"\"\" Represents the real space object, the calibration plate.\n Also defines array of real space coords. Order of dots in this\n array is defined by sweeping through first in x, then y, then z.\n So the top row at z1, second row at z1 ... last row at z1 then\n onto z2 etc.\n \"\"\"\n self.parent = parent\n self.sep = separation\n self.num_d = num_down\n self.num_r = num_right\n self.z_vals = list(z_vals)\n self.get_array()\n\n ###########################################################\n\n def get_array(self):\n \"\"\"\n Get an array of the real positions of the dots in the cal plate.\n Order of array: defined by sweepign through x, then y, then z.\n So the top row at z1, second row at z1 ... last row at z1 then\n onto z2 etc.\n \"\"\"\n\n real_pos = np.zeros((self.num_d*self.num_r*len(self.z_vals), 3))\n dot_counter = 0\n for z in self.z_vals:\n for y in range(0, self.num_d*self.sep, self.sep):\n for x in range(0, self.num_r*self.sep, self.sep):\n real_pos[dot_counter] = np.array([float(y),\n float(x),\n float(z)])\n dot_counter += 1\n self.ar = np.array(real_pos)\n\n###############################################################################\n\n\nclass CalibrationImageSet(object):\n \"\"\"\n Set of Calibration Images, allows us to specify the cal images\n as a dictionary and read them all in nicely and elegantly\n path_dict looks like: {z: (left_path, right_path), ...}\n \"\"\"\n def __init__(self, path_dict, parent=None):\n self.parent = parent\n self.path_dict = path_dict\n\n self.make_cal_img_list()\n\n ###########################################################\n\n def make_cal_img_list(self):\n \"\"\"\n sort the images by z value, return a list of CalImage objects\n \"\"\"\n sorted_z = sorted(self.path_dict, key=self.path_dict.get)\n self.cal_img_list = []\n for z in sorted_z:\n left, right = self.path_dict[z]\n self.cal_img_list.append(\n CalibrationImage(z, str(self.parent.img_dir) + '/' + left,\n str(self.parent.img_dir) + '/' + right,\n self.parent.camera))\n\n\n###############################################################################\n\n\nclass CalibrationImage(object):\n \"\"\"\n Calibration image containing a z value and paths to a left and\n right image\n \"\"\"\n def __init__(self, z, left_path, right_path, camera):\n self.z = z\n self.left_path = left_path\n self.right_path = right_path\n self.camera = camera\n self.get_arrays()\n\n ###########################################################\n\n def get_arrays(self):\n self.left = corrt.TiffCorrObject(self.left_path, parent=self)\n self.right = corrt.TiffCorrObject(self.right_path, parent=self)\n\n camera_shape = self.camera.shape\n self.left.ar = corrt.pad_to_shape(self.left.ar, camera_shape)\n self.right.ar = corrt.pad_to_shape(self.right.ar, camera_shape)\n\n###############################################################################\n"
]
| [
[
"numpy.concatenate",
"numpy.around",
"numpy.array"
]
]
|
HsunGong/Augmented-Advertisement | [
"ae9d0f5796c13e837a1a547d888647aeb61f0b04"
]
| [
"semseg/scripts/eval_multipro.py"
]
| [
"# System libs\nimport os\nimport argparse\nfrom distutils.version import LooseVersion\nfrom multiprocessing import Queue, Process\n# Numerical libs\nimport numpy as np\nimport math\nimport torch\nimport torch.nn as nn\nfrom scipy.io import loadmat\n# Our libs\nfrom config import cfg\nfrom dataset import ValDataset\nfrom models import ModelBuilder, SegmentationModule\nfrom utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices, setup_logger\nfrom lib.nn import user_scattered_collate, async_copy_to\nfrom lib.utils import as_numpy\nfrom PIL import Image\nfrom tqdm import tqdm\n\ncolors = loadmat('data/ADEChallengeData2016/color150.mat')['colors']\n\n\ndef visualize_result(data, pred, dir_result):\n (img, seg, info) = data\n\n # segmentation\n seg_color = colorEncode(seg, colors)\n\n # prediction\n pred_color = colorEncode(pred, colors)\n\n # aggregate images and save\n im_vis = np.concatenate((img, seg_color, pred_color),\n axis=1).astype(np.uint8)\n\n img_name = info.split('/')[-1]\n Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))\n\n\ndef evaluate(segmentation_module, loader, cfg, gpu_id, result_queue):\n segmentation_module.eval()\n\n for batch_data in loader:\n # process data\n batch_data = batch_data[0]\n seg_label = as_numpy(batch_data['seg_label'][0])\n img_resized_list = batch_data['img_data']\n\n with torch.no_grad():\n segSize = (seg_label.shape[0], seg_label.shape[1])\n scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])\n scores = async_copy_to(scores, gpu_id)\n\n for img in img_resized_list:\n feed_dict = batch_data.copy()\n feed_dict['img_data'] = img\n del feed_dict['img_ori']\n del feed_dict['info']\n feed_dict = async_copy_to(feed_dict, gpu_id)\n\n # forward pass\n scores_tmp = segmentation_module(feed_dict, segSize=segSize)\n scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)\n\n _, pred = torch.max(scores, dim=1)\n pred = as_numpy(pred.squeeze(0).cpu())\n\n # calculate accuracy and SEND THEM TO MASTER\n acc, pix = accuracy(pred, seg_label)\n intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)\n result_queue.put_nowait((acc, pix, intersection, union))\n\n # visualization\n if cfg.VAL.visualize:\n visualize_result(\n (batch_data['img_ori'], seg_label, batch_data['info']),\n pred,\n os.path.join(cfg.DIR, 'result')\n )\n\n\ndef worker(cfg, gpu_id, start_idx, end_idx, result_queue):\n torch.cuda.set_device(gpu_id)\n\n # Dataset and Loader\n dataset_val = ValDataset(\n cfg.DATASET.root_dataset,\n cfg.DATASET.list_val,\n cfg.DATASET,\n start_idx=start_idx, end_idx=end_idx)\n loader_val = torch.utils.data.DataLoader(\n dataset_val,\n batch_size=cfg.VAL.batch_size,\n shuffle=False,\n collate_fn=user_scattered_collate,\n num_workers=2)\n\n # Network Builders\n net_encoder = ModelBuilder.build_encoder(\n arch=cfg.MODEL.arch_encoder.lower(),\n fc_dim=cfg.MODEL.fc_dim,\n weights=cfg.MODEL.weights_encoder)\n net_decoder = ModelBuilder.build_decoder(\n arch=cfg.MODEL.arch_decoder.lower(),\n fc_dim=cfg.MODEL.fc_dim,\n num_class=cfg.DATASET.num_class,\n weights=cfg.MODEL.weights_decoder,\n use_softmax=True)\n\n crit = nn.NLLLoss(ignore_index=-1)\n\n segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)\n\n segmentation_module.cuda()\n\n # Main loop\n evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)\n\n\ndef main(cfg, gpus):\n with open(cfg.DATASET.list_val, 'r') as f:\n lines = f.readlines()\n num_files = len(lines)\n\n num_files_per_gpu = math.ceil(num_files / len(gpus))\n\n pbar = tqdm(total=num_files)\n\n acc_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n\n result_queue = Queue(500)\n procs = []\n for idx, gpu_id in enumerate(gpus):\n start_idx = idx * num_files_per_gpu\n end_idx = min(start_idx + num_files_per_gpu, num_files)\n proc = Process(target=worker, args=(cfg, gpu_id, start_idx, end_idx, result_queue))\n print('gpu:{}, start_idx:{}, end_idx:{}'.format(gpu_id, start_idx, end_idx))\n proc.start()\n procs.append(proc)\n\n # master fetches results\n processed_counter = 0\n while processed_counter < num_files:\n if result_queue.empty():\n continue\n (acc, pix, intersection, union) = result_queue.get()\n acc_meter.update(acc, pix)\n intersection_meter.update(intersection)\n union_meter.update(union)\n processed_counter += 1\n pbar.update(1)\n\n for p in procs:\n p.join()\n\n # summary\n iou = intersection_meter.sum / (union_meter.sum + 1e-10)\n for i, _iou in enumerate(iou):\n print('class [{}], IoU: {:.4f}'.format(i, _iou))\n\n print('[Eval Summary]:')\n print('Mean IoU: {:.4f}, Accuracy: {:.2f}%'\n .format(iou.mean(), acc_meter.average()*100))\n\n print('Evaluation Done!')\n\n\nif __name__ == '__main__':\n assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \\\n 'PyTorch>=0.4.0 is required'\n\n parser = argparse.ArgumentParser(\n description=\"PyTorch Semantic Segmentation Validation\"\n )\n parser.add_argument(\n \"--cfg\",\n default=\"config/ade20k-resnet50dilated-ppm_deepsup.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\n \"--gpus\",\n default=\"0-3\",\n help=\"gpus to use, e.g. 0-3 or 0,1,2,3\"\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n args = parser.parse_args()\n\n cfg.merge_from_file(args.cfg)\n cfg.merge_from_list(args.opts)\n # cfg.freeze()\n\n logger = setup_logger(distributed_rank=0) # TODO\n logger.info(\"Loaded configuration file {}\".format(args.cfg))\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n # absolute paths of model weights\n cfg.MODEL.weights_encoder = os.path.join(\n cfg.DIR, 'encoder_' + cfg.VAL.checkpoint)\n cfg.MODEL.weights_decoder = os.path.join(\n cfg.DIR, 'decoder_' + cfg.VAL.checkpoint)\n assert os.path.exists(cfg.MODEL.weights_encoder) and \\\n os.path.exists(cfg.MODEL.weights_decoder), \"checkpoint does not exitst!\"\n\n if not os.path.isdir(os.path.join(cfg.DIR, \"result\")):\n os.makedirs(os.path.join(cfg.DIR, \"result\"))\n\n # Parse gpu ids\n gpus = parse_devices(args.gpus)\n gpus = [x.replace('gpu', '') for x in gpus]\n gpus = [int(x) for x in gpus]\n\n main(cfg, gpus)\n"
]
| [
[
"torch.nn.NLLLoss",
"numpy.concatenate",
"torch.zeros",
"torch.max",
"torch.no_grad",
"scipy.io.loadmat",
"torch.cuda.set_device",
"torch.utils.data.DataLoader"
]
]
|
arita37/GP-VAE | [
"cfc90858989425b5787c2063615f9b67f63cc229"
]
| [
"train.py"
]
| [
"\"\"\"\n\nScript to train the proposed GP-VAE model.\n\n\"\"\"\n\nimport sys\nimport os\nimport time\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"Agg\")\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\n\ntf.compat.v1.enable_eager_execution()\n\nfrom sklearn.metrics import average_precision_score, roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nfrom absl import app\nfrom absl import flags\n\nsys.path.append(\"..\")\nfrom lib.models import *\n\n\nFLAGS = flags.FLAGS\n\n# HMNIST config\n# flags.DEFINE_integer('latent_dim', 256, 'Dimensionality of the latent space')\n# flags.DEFINE_list('encoder_sizes', [256, 256], 'Layer sizes of the encoder')\n# flags.DEFINE_list('decoder_sizes', [256, 256, 256], 'Layer sizes of the decoder')\n# flags.DEFINE_integer('window_size', 3, 'Window size for the inference CNN: Ignored if model_type is not gp-vae')\n# flags.DEFINE_float('sigma', 1.0, 'Sigma value for the GP prior: Ignored if model_type is not gp-vae')\n# flags.DEFINE_float('length_scale', 2.0, 'Length scale value for the GP prior: Ignored if model_type is not gp-vae')\n# flags.DEFINE_float('beta', 0.8, 'Factor to weigh the KL term (similar to beta-VAE)')\n# flags.DEFINE_integer('num_epochs', 20, 'Number of training epochs')\n\n# SPRITES config GP-VAE\n# flags.DEFINE_integer('latent_dim', 256, 'Dimensionality of the latent space')\n# flags.DEFINE_list('encoder_sizes', [32, 256, 256], 'Layer sizes of the encoder')\n# flags.DEFINE_list('decoder_sizes', [256, 256, 256], 'Layer sizes of the decoder')\n# flags.DEFINE_integer('window_size', 3, 'Window size for the inference CNN: Ignored if model_type is not gp-vae')\n# flags.DEFINE_float('sigma', 1.0, 'Sigma value for the GP prior: Ignored if model_type is not gp-vae')\n# flags.DEFINE_float('length_scale', 2.0, 'Length scale value for the GP prior: Ignored if model_type is not gp-vae')\n# flags.DEFINE_float('beta', 0.1, 'Factor to weigh the KL term (similar to beta-VAE)')\n# flags.DEFINE_integer('num_epochs', 20, 'Number of training epochs')\n\n# Physionet config\nflags.DEFINE_integer('latent_dim', 35, 'Dimensionality of the latent space')\nflags.DEFINE_list('encoder_sizes', [128, 128], 'Layer sizes of the encoder')\nflags.DEFINE_list('decoder_sizes', [256, 256], 'Layer sizes of the decoder')\nflags.DEFINE_integer('window_size', 24, 'Window size for the inference CNN: Ignored if model_type is not gp-vae')\nflags.DEFINE_float('sigma', 1.005, 'Sigma value for the GP prior: Ignored if model_type is not gp-vae')\nflags.DEFINE_float('length_scale', 7.0, 'Length scale value for the GP prior: Ignored if model_type is not gp-vae')\nflags.DEFINE_float('beta', 0.2, 'Factor to weigh the KL term (similar to beta-VAE)')\nflags.DEFINE_integer('num_epochs', 40, 'Number of training epochs')\n\n# Flags with common default values for all three datasets\nflags.DEFINE_float('learning_rate', 1e-3, 'Learning rate for training')\nflags.DEFINE_float('gradient_clip', 1e4, 'Maximum global gradient norm for the gradient clipping during training')\nflags.DEFINE_integer('num_steps', 0, 'Number of training steps: If non-zero it overwrites num_epochs')\nflags.DEFINE_integer('print_interval', 0, 'Interval for printing the loss and saving the model during training')\nflags.DEFINE_string('exp_name', \"debug\", 'Name of the experiment')\nflags.DEFINE_string('basedir', \"models\", 'Directory where the models should be stored')\nflags.DEFINE_string('data_dir', \"\", 'Directory from where the data should be read in')\nflags.DEFINE_enum('data_type', 'hmnist', ['hmnist', 'physionet', 'sprites'], 'Type of data to be trained on')\nflags.DEFINE_integer('seed', 1337, 'Seed for the random number generator')\nflags.DEFINE_enum('model_type', 'gp-vae', ['vae', 'hi-vae', 'gp-vae'], 'Type of model to be trained')\nflags.DEFINE_integer('cnn_kernel_size', 3, 'Kernel size for the CNN preprocessor')\nflags.DEFINE_list('cnn_sizes', [256], 'Number of filters for the layers of the CNN preprocessor')\nflags.DEFINE_boolean('testing', False, 'Use the actual test set for testing')\nflags.DEFINE_boolean('banded_covar', False, 'Use a banded covariance matrix instead of a diagonal one for the output of the inference network: Ignored if model_type is not gp-vae')\nflags.DEFINE_integer('batch_size', 64, 'Batch size for training')\n\nflags.DEFINE_integer('M', 1, 'Number of samples for ELBO estimation')\nflags.DEFINE_integer('K', 1, 'Number of importance sampling weights')\n\nflags.DEFINE_enum('kernel', 'cauchy', ['rbf', 'diffusion', 'matern', 'cauchy'], 'Kernel to be used for the GP prior: Ignored if model_type is not (m)gp-vae')\nflags.DEFINE_integer('kernel_scales', 1, 'Number of different length scales sigma for the GP prior: Ignored if model_type is not gp-vae')\n\n\ndef main(argv):\n del argv # unused\n np.random.seed(FLAGS.seed)\n tf.compat.v1.set_random_seed(FLAGS.seed)\n\n print(\"Testing: \", FLAGS.testing, f\"\\t Seed: {FLAGS.seed}\")\n\n FLAGS.encoder_sizes = [int(size) for size in FLAGS.encoder_sizes]\n FLAGS.decoder_sizes = [int(size) for size in FLAGS.decoder_sizes]\n\n if 0 in FLAGS.encoder_sizes:\n FLAGS.encoder_sizes.remove(0)\n if 0 in FLAGS.decoder_sizes:\n FLAGS.decoder_sizes.remove(0)\n\n # Make up full exp name\n timestamp = datetime.now().strftime(\"%y%m%d\")\n full_exp_name = \"{}_{}\".format(timestamp, FLAGS.exp_name)\n outdir = os.path.join(FLAGS.basedir, full_exp_name)\n if not os.path.exists(outdir): os.mkdir(outdir)\n checkpoint_prefix = os.path.join(outdir, \"ckpt\")\n print(\"Full exp name: \", full_exp_name)\n\n\n ###################################\n # Define data specific parameters #\n ###################################\n\n if FLAGS.data_type == \"hmnist\":\n FLAGS.data_dir = \"data/hmnist/hmnist_mnar.npz\"\n data_dim = 784\n time_length = 10\n num_classes = 10\n decoder = BernoulliDecoder\n img_shape = (28, 28, 1)\n val_split = 50000\n elif FLAGS.data_type == \"physionet\":\n if FLAGS.data_dir == \"\":\n FLAGS.data_dir = \"data/physionet/physionet.npz\"\n data_dim = 35\n time_length = 48\n num_classes = 2\n\n decoder = GaussianDecoder\n elif FLAGS.data_type == \"sprites\":\n if FLAGS.data_dir == \"\":\n FLAGS.data_dir = \"data/sprites/sprites.npz\"\n data_dim = 12288\n time_length = 8\n decoder = GaussianDecoder\n img_shape = (64, 64, 3)\n val_split = 8000\n else:\n raise ValueError(\"Data type must be one of ['hmnist', 'physionet', 'sprites']\")\n\n\n #############\n # Load data #\n #############\n\n data = np.load(FLAGS.data_dir)\n x_train_full = data['x_train_full']\n x_train_miss = data['x_train_miss']\n m_train_miss = data['m_train_miss']\n if FLAGS.data_type in ['hmnist', 'physionet']:\n y_train = data['y_train']\n\n if FLAGS.testing:\n if FLAGS.data_type in ['hmnist', 'sprites']:\n x_val_full = data['x_test_full']\n x_val_miss = data['x_test_miss']\n m_val_miss = data['m_test_miss']\n if FLAGS.data_type == 'hmnist':\n y_val = data['y_test']\n elif FLAGS.data_type == 'physionet':\n x_val_full = data['x_train_full']\n x_val_miss = data['x_train_miss']\n m_val_miss = data['m_train_miss']\n y_val = data['y_train']\n m_val_artificial = data[\"m_train_artificial\"]\n elif FLAGS.data_type in ['hmnist', 'sprites']:\n x_val_full = x_train_full[val_split:]\n x_val_miss = x_train_miss[val_split:]\n m_val_miss = m_train_miss[val_split:]\n if FLAGS.data_type == 'hmnist':\n y_val = y_train[val_split:]\n x_train_full = x_train_full[:val_split]\n x_train_miss = x_train_miss[:val_split]\n m_train_miss = m_train_miss[:val_split]\n y_train = y_train[:val_split]\n elif FLAGS.data_type == 'physionet':\n x_val_full = data[\"x_val_full\"] # full for artificial missings\n x_val_miss = data[\"x_val_miss\"]\n m_val_miss = data[\"m_val_miss\"]\n m_val_artificial = data[\"m_val_artificial\"]\n y_val = data[\"y_val\"]\n else:\n raise ValueError(\"Data type must be one of ['hmnist', 'physionet', 'sprites']\")\n\n tf_x_train_miss = tf.data.Dataset.from_tensor_slices((x_train_miss, m_train_miss))\\\n .shuffle(len(x_train_miss)).batch(FLAGS.batch_size).repeat()\n tf_x_val_miss = tf.data.Dataset.from_tensor_slices((x_val_miss, m_val_miss)).batch(FLAGS.batch_size).repeat()\n tf_x_val_miss = tf.compat.v1.data.make_one_shot_iterator(tf_x_val_miss)\n\n # Build Conv2D preprocessor for image data\n if FLAGS.data_type in ['hmnist', 'sprites']:\n print(\"Using CNN preprocessor\")\n image_preprocessor = ImagePreprocessor(img_shape, FLAGS.cnn_sizes, FLAGS.cnn_kernel_size)\n elif FLAGS.data_type == 'physionet':\n image_preprocessor = None\n else:\n raise ValueError(\"Data type must be one of ['hmnist', 'physionet', 'sprites']\")\n\n\n ###############\n # Build model #\n ###############\n\n if FLAGS.model_type == \"vae\":\n model = VAE(latent_dim=FLAGS.latent_dim, data_dim=data_dim, time_length=time_length,\n encoder_sizes=FLAGS.encoder_sizes, encoder=DiagonalEncoder,\n decoder_sizes=FLAGS.decoder_sizes, decoder=decoder,\n image_preprocessor=image_preprocessor, window_size=FLAGS.window_size,\n beta=FLAGS.beta, M=FLAGS.M, K=FLAGS.K)\n elif FLAGS.model_type == \"hi-vae\":\n model = HI_VAE(latent_dim=FLAGS.latent_dim, data_dim=data_dim, time_length=time_length,\n encoder_sizes=FLAGS.encoder_sizes, encoder=DiagonalEncoder,\n decoder_sizes=FLAGS.decoder_sizes, decoder=decoder,\n image_preprocessor=image_preprocessor, window_size=FLAGS.window_size,\n beta=FLAGS.beta, M=FLAGS.M, K=FLAGS.K)\n elif FLAGS.model_type == \"gp-vae\":\n encoder = BandedJointEncoder if FLAGS.banded_covar else JointEncoder\n model = GP_VAE(latent_dim=FLAGS.latent_dim, data_dim=data_dim, time_length=time_length,\n encoder_sizes=FLAGS.encoder_sizes, encoder=encoder,\n decoder_sizes=FLAGS.decoder_sizes, decoder=decoder,\n kernel=FLAGS.kernel, sigma=FLAGS.sigma,\n length_scale=FLAGS.length_scale, kernel_scales = FLAGS.kernel_scales,\n image_preprocessor=image_preprocessor, window_size=FLAGS.window_size,\n beta=FLAGS.beta, M=FLAGS.M, K=FLAGS.K)\n else:\n raise ValueError(\"Model type must be one of ['vae', 'hi-vae', 'gp-vae']\")\n\n\n ########################\n # Training preparation #\n ########################\n\n print(\"GPU support: \", tf.test.is_gpu_available())\n\n print(\"Training...\")\n _ = tf.compat.v1.train.get_or_create_global_step()\n trainable_vars = model.get_trainable_vars()\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)\n\n print(\"Encoder: \", model.encoder.net.summary())\n print(\"Decoder: \", model.decoder.net.summary())\n\n if model.preprocessor is not None:\n print(\"Preprocessor: \", model.preprocessor.net.summary())\n saver = tf.compat.v1.train.Checkpoint(optimizer=optimizer, encoder=model.encoder.net,\n decoder=model.decoder.net, preprocessor=model.preprocessor.net,\n optimizer_step=tf.compat.v1.train.get_or_create_global_step())\n else:\n saver = tf.compat.v1.train.Checkpoint(optimizer=optimizer, encoder=model.encoder.net, decoder=model.decoder.net,\n optimizer_step=tf.compat.v1.train.get_or_create_global_step())\n\n summary_writer = tf.contrib.summary.create_file_writer(outdir, flush_millis=10000)\n\n if FLAGS.num_steps == 0:\n num_steps = FLAGS.num_epochs * len(x_train_miss) // FLAGS.batch_size\n else:\n num_steps = FLAGS.num_steps\n\n if FLAGS.print_interval == 0:\n FLAGS.print_interval = num_steps // FLAGS.num_epochs\n\n\n ############\n # Training #\n ############\n\n losses_train = []\n losses_val = []\n\n t0 = time.time()\n with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():\n for i, (x_seq, m_seq) in enumerate(tf_x_train_miss.take(num_steps)):\n try:\n with tf.GradientTape() as tape:\n tape.watch(trainable_vars)\n loss = model.compute_loss(x_seq, m_mask=m_seq)\n losses_train.append(loss.numpy())\n grads = tape.gradient(loss, trainable_vars)\n grads = [np.nan_to_num(grad) for grad in grads]\n grads, global_norm = tf.clip_by_global_norm(grads, FLAGS.gradient_clip)\n optimizer.apply_gradients(zip(grads, trainable_vars),\n global_step=tf.compat.v1.train.get_or_create_global_step())\n\n # Print intermediate results\n if i % FLAGS.print_interval == 0:\n print(\"================================================\")\n print(\"Learning rate: {} | Global gradient norm: {:.2f}\".format(optimizer._lr, global_norm))\n print(\"Step {}) Time = {:2f}\".format(i, time.time() - t0))\n loss, nll, kl = model.compute_loss(x_seq, m_mask=m_seq, return_parts=True)\n print(\"Train loss = {:.3f} | NLL = {:.3f} | KL = {:.3f}\".format(loss, nll, kl))\n\n saver.save(checkpoint_prefix)\n tf.contrib.summary.scalar(\"loss_train\", loss)\n tf.contrib.summary.scalar(\"kl_train\", kl)\n tf.contrib.summary.scalar(\"nll_train\", nll)\n\n # Validation loss\n x_val_batch, m_val_batch = tf_x_val_miss.get_next()\n val_loss, val_nll, val_kl = model.compute_loss(x_val_batch, m_mask=m_val_batch, return_parts=True)\n losses_val.append(val_loss.numpy())\n print(\"Validation loss = {:.3f} | NLL = {:.3f} | KL = {:.3f}\".format(val_loss, val_nll, val_kl))\n\n tf.contrib.summary.scalar(\"loss_val\", val_loss)\n tf.contrib.summary.scalar(\"kl_val\", val_kl)\n tf.contrib.summary.scalar(\"nll_val\", val_nll)\n\n if FLAGS.data_type in [\"hmnist\", \"sprites\"]:\n # Draw reconstructed images\n x_hat = model.decode(model.encode(x_seq).sample()).mean()\n tf.contrib.summary.image(\"input_train\", tf.reshape(x_seq, [-1]+list(img_shape)))\n tf.contrib.summary.image(\"reconstruction_train\", tf.reshape(x_hat, [-1]+list(img_shape)))\n elif FLAGS.data_type == 'physionet':\n # Eval MSE and AUROC on entire val set\n x_val_miss_batches = np.array_split(x_val_miss, FLAGS.batch_size, axis=0)\n x_val_full_batches = np.array_split(x_val_full, FLAGS.batch_size, axis=0)\n m_val_artificial_batches = np.array_split(m_val_artificial, FLAGS.batch_size, axis=0)\n get_val_batches = lambda: zip(x_val_miss_batches, x_val_full_batches, m_val_artificial_batches)\n\n n_missings = m_val_artificial.sum()\n mse_miss = np.sum([model.compute_mse(x, y=y, m_mask=m).numpy()\n for x, y, m in get_val_batches()]) / n_missings\n\n x_val_imputed = np.vstack([model.decode(model.encode(x_batch).mean()).mean().numpy()\n for x_batch in x_val_miss_batches])\n x_val_imputed[m_val_miss == 0] = x_val_miss[m_val_miss == 0] # impute gt observed values\n\n x_val_imputed = x_val_imputed.reshape([-1, time_length * data_dim])\n val_split = len(x_val_imputed) // 2\n cls_model = LogisticRegression(solver='liblinear', tol=1e-10, max_iter=10000)\n cls_model.fit(x_val_imputed[:val_split], y_val[:val_split])\n probs = cls_model.predict_proba(x_val_imputed[val_split:])[:, 1]\n auroc = roc_auc_score(y_val[val_split:], probs)\n print(\"MSE miss: {:.4f} | AUROC: {:.4f}\".format(mse_miss, auroc))\n\n # Update learning rate (used only for physionet with decay=0.5)\n if i > 0 and i % (10*FLAGS.print_interval) == 0:\n optimizer._lr = max(0.5 * optimizer._lr, 0.1 * FLAGS.learning_rate)\n t0 = time.time()\n except KeyboardInterrupt:\n saver.save(checkpoint_prefix)\n if FLAGS.debug:\n import ipdb\n ipdb.set_trace()\n break\n\n\n ##############\n # Evaluation #\n ##############\n\n print(\"Evaluation...\")\n\n # Split data on batches\n x_val_miss_batches = np.array_split(x_val_miss, FLAGS.batch_size, axis=0)\n x_val_full_batches = np.array_split(x_val_full, FLAGS.batch_size, axis=0)\n if FLAGS.data_type == 'physionet':\n m_val_batches = np.array_split(m_val_artificial, FLAGS.batch_size, axis=0)\n else:\n m_val_batches = np.array_split(m_val_miss, FLAGS.batch_size, axis=0)\n get_val_batches = lambda: zip(x_val_miss_batches, x_val_full_batches, m_val_batches)\n\n # Compute NLL and MSE on missing values\n n_missings = m_val_artificial.sum() if FLAGS.data_type == 'physionet' else m_val_miss.sum()\n nll_miss = np.sum([model.compute_nll(x, y=y, m_mask=m).numpy()\n for x, y, m in get_val_batches()]) / n_missings\n mse_miss = np.sum([model.compute_mse(x, y=y, m_mask=m, binary=FLAGS.data_type==\"hmnist\").numpy()\n for x, y, m in get_val_batches()]) / n_missings\n print(\"NLL miss: {:.4f}\".format(nll_miss))\n print(\"MSE miss: {:.4f}\".format(mse_miss))\n\n # Save imputed values\n z_mean = [model.encode(x_batch).mean().numpy() for x_batch in x_val_miss_batches]\n np.save(os.path.join(outdir, \"z_mean\"), np.vstack(z_mean))\n x_val_imputed = np.vstack([model.decode(z_batch).mean().numpy() for z_batch in z_mean])\n np.save(os.path.join(outdir, \"imputed_no_gt\"), x_val_imputed)\n\n # impute gt observed values\n x_val_imputed[m_val_miss == 0] = x_val_miss[m_val_miss == 0]\n np.save(os.path.join(outdir, \"imputed\"), x_val_imputed)\n\n if FLAGS.data_type == \"hmnist\":\n # AUROC evaluation using Logistic Regression\n x_val_imputed = np.round(x_val_imputed)\n x_val_imputed = x_val_imputed.reshape([-1, time_length * data_dim])\n\n cls_model = LogisticRegression(solver='lbfgs', multi_class='multinomial', tol=1e-10, max_iter=10000)\n val_split = len(x_val_imputed) // 2\n\n cls_model.fit(x_val_imputed[:val_split], y_val[:val_split])\n probs = cls_model.predict_proba(x_val_imputed[val_split:])\n\n auprc = average_precision_score(np.eye(num_classes)[y_val[val_split:]], probs)\n auroc = roc_auc_score(np.eye(num_classes)[y_val[val_split:]], probs)\n print(\"AUROC: {:.4f}\".format(auroc))\n print(\"AUPRC: {:.4f}\".format(auprc))\n\n elif FLAGS.data_type == \"sprites\":\n auroc, auprc = 0, 0\n\n elif FLAGS.data_type == \"physionet\":\n # Uncomment to preserve some z_samples and their reconstructions\n # for i in range(5):\n # z_sample = [model.encode(x_batch).sample().numpy() for x_batch in x_val_miss_batches]\n # np.save(os.path.join(outdir, \"z_sample_{}\".format(i)), np.vstack(z_sample))\n # x_val_imputed_sample = np.vstack([model.decode(z_batch).mean().numpy() for z_batch in z_sample])\n # np.save(os.path.join(outdir, \"imputed_sample_{}_no_gt\".format(i)), x_val_imputed_sample)\n # x_val_imputed_sample[m_val_miss == 0] = x_val_miss[m_val_miss == 0]\n # np.save(os.path.join(outdir, \"imputed_sample_{}\".format(i)), x_val_imputed_sample)\n\n # AUROC evaluation using Logistic Regression\n x_val_imputed = x_val_imputed.reshape([-1, time_length * data_dim])\n val_split = len(x_val_imputed) // 2\n cls_model = LogisticRegression(solver='liblinear', tol=1e-10, max_iter=10000)\n cls_model.fit(x_val_imputed[:val_split], y_val[:val_split])\n probs = cls_model.predict_proba(x_val_imputed[val_split:])[:, 1]\n auprc = average_precision_score(y_val[val_split:], probs)\n auroc = roc_auc_score(y_val[val_split:], probs)\n\n print(\"AUROC: {:.4f}\".format(auroc))\n print(\"AUPRC: {:.4f}\".format(auprc))\n\n # Visualize reconstructions\n if FLAGS.data_type in [\"hmnist\", \"sprites\"]:\n img_index = 0\n if FLAGS.data_type == \"hmnist\":\n img_shape = (28, 28)\n cmap = \"gray\"\n elif FLAGS.data_type == \"sprites\":\n img_shape = (64, 64, 3)\n cmap = None\n\n fig, axes = plt.subplots(nrows=3, ncols=x_val_miss.shape[1], figsize=(2*x_val_miss.shape[1], 6))\n\n x_hat = model.decode(model.encode(x_val_miss[img_index: img_index+1]).mean()).mean().numpy()\n seqs = [x_val_miss[img_index:img_index+1], x_hat, x_val_full[img_index:img_index+1]]\n\n for axs, seq in zip(axes, seqs):\n for ax, img in zip(axs, seq[0]):\n ax.imshow(img.reshape(img_shape), cmap=cmap)\n ax.axis('off')\n\n suptitle = FLAGS.model_type + f\" reconstruction, NLL missing = {mse_miss}\"\n fig.suptitle(suptitle, size=18)\n fig.savefig(os.path.join(outdir, FLAGS.data_type + \"_reconstruction.pdf\"))\n\n results_all = [FLAGS.seed, FLAGS.model_type, FLAGS.data_type, FLAGS.kernel, FLAGS.beta, FLAGS.latent_dim,\n FLAGS.num_epochs, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.window_size,\n FLAGS.kernel_scales, FLAGS.sigma, FLAGS.length_scale,\n len(FLAGS.encoder_sizes), FLAGS.encoder_sizes[0] if len(FLAGS.encoder_sizes) > 0 else 0,\n len(FLAGS.decoder_sizes), FLAGS.decoder_sizes[0] if len(FLAGS.decoder_sizes) > 0 else 0,\n FLAGS.cnn_kernel_size, FLAGS.cnn_sizes,\n nll_miss, mse_miss, losses_train[-1], losses_val[-1], auprc, auroc, FLAGS.testing, FLAGS.data_dir]\n\n with open(os.path.join(outdir, \"results.tsv\"), \"w\") as outfile:\n outfile.write(\"seed\\tmodel\\tdata\\tkernel\\tbeta\\tz_size\\tnum_epochs\"\n \"\\tbatch_size\\tlearning_rate\\twindow_size\\tkernel_scales\\t\"\n \"sigma\\tlength_scale\\tencoder_depth\\tencoder_width\\t\"\n \"decoder_depth\\tdecoder_width\\tcnn_kernel_size\\t\"\n \"cnn_sizes\\tNLL\\tMSE\\tlast_train_loss\\tlast_val_loss\\tAUPRC\\tAUROC\\ttesting\\tdata_dir\\n\")\n outfile.write(\"\\t\".join(map(str, results_all)))\n\n with open(os.path.join(outdir, \"training_curve.tsv\"), \"w\") as outfile:\n outfile.write(\"\\t\".join(map(str, losses_train)))\n outfile.write(\"\\n\")\n outfile.write(\"\\t\".join(map(str, losses_val)))\n\n print(\"Training finished.\")\n\n\nif __name__ == '__main__':\n app.run(main)\n"
]
| [
[
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.load",
"sklearn.metrics.average_precision_score",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"numpy.nan_to_num",
"tensorflow.GradientTape",
"tensorflow.compat.v1.train.AdamOptimizer",
"matplotlib.pyplot.subplots",
"numpy.eye",
"tensorflow.compat.v1.train.get_or_create_global_step",
"numpy.vstack",
"matplotlib.use",
"numpy.round",
"tensorflow.contrib.summary.scalar",
"tensorflow.contrib.summary.create_file_writer",
"tensorflow.clip_by_global_norm",
"sklearn.metrics.roc_auc_score",
"numpy.array_split",
"numpy.random.seed",
"tensorflow.test.is_gpu_available",
"tensorflow.contrib.summary.always_record_summaries",
"sklearn.linear_model.LogisticRegression",
"tensorflow.compat.v1.enable_eager_execution"
]
]
|
IMS-Bio2Core-Facility/single_snake_sequencing | [
"dddf8fc8960da8938484f98a4ea94f74a4fd5b18"
]
| [
"workflow/scripts/dim_reduc.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"Dimensional reduction and batch correction using Harmony.\"\"\"\nif __name__ == \"__main__\":\n import anndata as ad\n import matplotlib.pyplot as plt\n import scanpy as sc\n import scanpy.external as sce\n from helpers.logs.get_logger import get_logger\n from helpers.logs.sc_logs import set_sc_log\n from helpers.select_pcs import select_pcs\n\n LOG = snakemake.log[0] # noqa: F821\n PARAMS = snakemake.params # noqa: F821\n INPUT = snakemake.input # noqa: F821\n OUTPUT = snakemake.output # noqa: F821\n THREADS = snakemake.threads # noqa: F821\n\n logger = get_logger(__name__, LOG)\n sc.settings = set_sc_log(sc.settings, logfile=LOG)\n sc.settings.n_jobs = THREADS\n\n # Concatenate samples\n adata = ad.concat(\n [sc.read_h5ad(path) for path in INPUT[\"data\"]],\n join=\"outer\",\n merge=\"same\",\n label=None,\n )\n adata.obs_names_make_unique()\n logger.info(f\"Adata read from {INPUT['data']}\")\n logger.info(f\"Input data: {adata}\")\n\n # HVGs\n # Before normalisation as seurat_v3 expects raw counts\n if not PARAMS[\"nHVG\"]:\n nHVG = max(min(len(adata.obs) / 2, 10000), 1000)\n logger.info(f\"nHVG not provided. Using {nHVG}.\")\n sc.pp.highly_variable_genes(\n adata,\n n_top_genes=nHVG,\n flavor=\"seurat_v3\",\n batch_key=\"lane\",\n subset=False,\n )\n _ = sc.pl.highly_variable_genes(\n adata,\n log=False,\n show=False,\n save=False,\n )\n plt.savefig(OUTPUT[\"hvg\"], dpi=300, bbox_inches=\"tight\")\n plt.close()\n\n # Normalise\n # Exclude highly expressed to prevent skew of normalisation\n sc.pp.normalize_total(adata, exclude_highly_expressed=True)\n sc.pp.log1p(adata)\n\n # Save raw and filter\n adata.raw = adata\n\n # Regress and scale\n # No batch - covered with bbknn\n sc.pp.regress_out(adata, [\"total_counts\", \"pct_counts_mt\"], n_jobs=None)\n sc.pp.scale(adata, max_value=10)\n\n # PCA\n sc.tl.pca(adata, n_comps=50, use_highly_variable=True)\n _ = sc.pl.pca_variance_ratio(adata, n_pcs=50, show=False, save=False)\n plt.savefig(OUTPUT[\"elbow\"], dpi=300, bbox_inches=\"tight\")\n plt.close()\n\n # Harmony for batch correction\n # As it runs on all pcs include, we must first filter to desired\n npc, adata = select_pcs(adata, threshold=PARAMS[\"var_thresh\"])\n logger.info(f\"{npc} PCs used.\")\n\n sce.pp.harmony_integrate(\n adata,\n key=\"lane\",\n adjusted_basis=\"X_harmony\",\n max_iter_harmony=50,\n )\n\n # And save\n adata.write_h5ad(OUTPUT[\"data\"])\n"
]
| [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close"
]
]
|
ekm507/think-python | [
"23aa36e66042fca0512320656513460089e230ec"
]
| [
"dft.py"
]
| [
"import numpy as np\nfrom numpy import pi\nn = 100\nN = 100\na = np.zeros(n)\nb = np.linspace(-10, 10, n)\n\nc = 4 * np.sin(b) + 5 * np.sin(2 * b)\n\nfor i in range(n):\n d = np.sin(b * 2 * pi * i)\n a[i] = np.sum(d)\n\n\nfor i in range(n):\n if(abs(a[i]) > 0.1):\n print(i, a[i])"
]
| [
[
"numpy.sum",
"numpy.linspace",
"numpy.sin",
"numpy.zeros"
]
]
|
t0kk35/d373c7 | [
"7780b97545e581244fb4fb74347bb1b052b9ec3f"
]
| [
"test/pytorch/history_tests.py"
]
| [
"\"\"\"\nUnit Tests for PyTorch History Package\n(c) 2020 d373c7\n\"\"\"\nimport unittest\nimport torch\nimport torch.utils.data as data\n# noinspection PyProtectedMember\nfrom d373c7.pytorch.common import _History\n# inspection PyProtectedMember\nfrom math import ceil\n\nFILES_DIR = './files/'\n\n\nclass BaseHistoryCases(unittest.TestCase):\n def test_create_base(self):\n bs = 1\n t = torch.Tensor([[0.0, 0.1], [0.2, 0.3]])\n ds = data.TensorDataset(t)\n dl = data.DataLoader(ds, batch_size=bs)\n h = _History(dl)\n self.assertEqual(h.samples, t.shape[0], f'Sample size should have been {t.shape[0]}')\n self.assertEqual(h.batch_size, bs, f'Batch size should have been {bs}')\n self.assertEqual(h.epoch, 0, f'Initial epoch should be 0. Was {h.epoch}')\n self.assertEqual(h.steps, ceil(h.samples/bs), f'Number of steps should have been. {ceil(h.samples/bs)}')\n self.assertEqual(h.step, 0, f'Current step should be 0. Got {h.step}')\n # self.assertIsInstance(h.history, Dict, f'History object should have been a Dict {type(h.history)}')\n # self.assertListEqual(sorted(list(h.history.keys())), sorted(metrics), 'History object have metric keys')\n # for k, v in h.history.items():\n # self.assertIsInstance(v, List, f'Metric values should be a list {type(v)}')\n # self.assertEqual(len(v), 0, f'Metric values should have been empty')\n\n def test_work(self):\n bs = 1\n t = torch.Tensor([[0.0, 1.0], [0.0, 1.0]])\n ds = data.TensorDataset(t)\n dl = data.DataLoader(ds, batch_size=bs)\n h = _History(dl)\n h.start_epoch()\n self.assertEqual(h.epoch, 1, f'Current Epoch should have been 1. Was {h.epoch}')\n self.assertEqual(h.step, 0, f'Current step should have been 0. Was {h.step}')\n h.start_step()\n self.assertEqual(h.epoch, 1, f'Current Epoch should have been 1. Was {h.epoch}')\n self.assertEqual(h.step, 1, f'Current Step should have been 1. Was {h.step}')\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"torch.Tensor",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader"
]
]
|
prraoo/TrafficMonitoring | [
"d2bb4aca0f7250863e141c58081f96e726960631"
]
| [
"WGAN/dataloader_traffic.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom torchvision import datasets, transforms\n\n# Transforming images to with a common size\n\n#transform = transforms.Compose([transforms.Resize(255),\n# transforms.CenterCrop(224),\n# transforms.ToTensor()]\n# )\n\ntransform = transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\ndataset = datasets.ImageFolder(\"data/WGAN/train/\", transform=transform)\n\ndataloader_traffic = torch.utils.data.DataLoader(dataset, batch_size=256, shuffle=True)\n\nimages, labels = next(iter(dataloader_traffic))\n\n\n# Use this if you want to\n\n# plt.imshow(np.transpose(images[0].numpy(),(1,2,0)))\n# plt.show()\n"
]
| [
[
"torch.utils.data.DataLoader"
]
]
|
denisov-vlad/socceraction | [
"0b03bc230bebb7e830d65aa231d961b879b828fb"
]
| [
"socceraction/vaep/base.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"Implements the VAEP framework.\n\nAttributes\n----------\nxfns_default : list(callable)\n The default VAEP features.\n\n\"\"\"\nimport math\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.metrics import brier_score_loss, roc_auc_score\n\nimport socceraction.spadl.config as spadlcfg\n\nfrom . import features as fs\nfrom . import formula as vaep\nfrom . import labels as lab\n\ntry:\n import xgboost\nexcept ImportError:\n xgboost = None\ntry:\n import catboost\nexcept ImportError:\n catboost = None\ntry:\n import lightgbm\nexcept ImportError:\n lightgbm = None\n\n\nxfns_default = [\n fs.actiontype_onehot,\n fs.result_onehot,\n fs.actiontype_result_onehot,\n fs.bodypart_onehot,\n fs.time,\n fs.startlocation,\n fs.endlocation,\n fs.startpolar,\n fs.endpolar,\n fs.movement,\n fs.team,\n fs.time_delta,\n fs.space_delta,\n fs.goalscore,\n]\n\n\nclass VAEP:\n \"\"\"\n An implementation of the VAEP framework [Decroos19]_.\n\n VAEP (Valuing Actions by Estimating Probabilities) defines the problem of\n valuing a soccer player's contributions within a match as a binary\n classification problem and rates actions by estimating its effect on the\n short-term probablities that a team will both score and concede.\n\n Parameters\n ----------\n xfns : list\n List of feature transformers (see :mod:`socceraction.vaep.features`)\n used to describe the game states. Uses :attr:`~socceraction.vaep.base.xfns_default`\n if None.\n nb_prev_actions : int, default=3\n Number of previous actions used to decscribe the game state.\n\n\n .. [Decroos19] Decroos, Tom, Lotte Bransen, Jan Van Haaren, and Jesse Davis.\n \"Actions speak louder than goals: Valuing player actions in soccer.\" In\n Proceedings of the 25th ACM SIGKDD International Conference on Knowledge\n Discovery & Data Mining, pp. 1851-1861. 2019.\n \"\"\"\n\n _spadlcfg = spadlcfg\n _fs = fs\n _lab = lab\n _vaep = vaep\n\n def __init__(\n self,\n xfns: Optional[List[Callable[[List[pd.DataFrame]], pd.DataFrame]]] = None,\n nb_prev_actions: int = 3,\n ):\n self.__models: Dict[str, Any] = {}\n self.xfns = xfns_default if xfns is None else xfns\n self.yfns = [self._lab.scores, self._lab.concedes]\n self.nb_prev_actions = nb_prev_actions\n\n def compute_features(self, game: pd.Series, game_actions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Transform actions to the feature-based representation of game states.\n\n Parameters\n ----------\n game : pd.Series\n The SPADL representation of a single game.\n game_actions : pd.DataFrame\n The actions performed during `game` in the SPADL representation.\n\n Returns\n -------\n features : pd.DataFrame\n Returns the feature-based representation of each game state in the game.\n \"\"\"\n game_actions_with_names = self._spadlcfg.add_names(game_actions)\n gamestates = self._fs.gamestates(game_actions_with_names, self.nb_prev_actions)\n gamestates = self._fs.play_left_to_right(gamestates, game.home_team_id)\n return pd.concat([fn(gamestates) for fn in self.xfns], axis=1)\n\n def compute_labels(\n self, game: pd.Series, game_actions: pd.DataFrame\n ) -> pd.DataFrame: # pylint: disable=W0613\n \"\"\"\n Compute the labels for each game state in the given game.\n\n Parameters\n ----------\n game : pd.Series\n The SPADL representation of a single game.\n game_actions : pd.DataFrame\n The actions performed during `game` in the SPADL representation.\n\n Returns\n -------\n labels : pd.DataFrame\n Returns the labels of each game state in the game.\n \"\"\"\n game_actions_with_names = self._spadlcfg.add_names(game_actions)\n return pd.concat([fn(game_actions_with_names) for fn in self.yfns], axis=1)\n\n def fit(\n self,\n X: pd.DataFrame,\n y: pd.DataFrame,\n learner: str = 'xgboost',\n val_size: float = 0.25,\n tree_params: Optional[Dict[str, Any]] = None,\n fit_params: Optional[Dict[str, Any]] = None,\n ) -> 'VAEP':\n \"\"\"\n Fit the model according to the given training data.\n\n Parameters\n ----------\n X : pd.DataFrame\n Feature representation of the game states.\n y : pd.DataFrame\n Scoring and conceding labels for each game state.\n learner : string, default='xgboost'\n Gradient boosting implementation which should be used to learn the\n model. The supported learners are 'xgboost', 'catboost' and 'lightgbm'.\n val_size : float, default=0.25\n Percentage of the dataset that will be used as the validation set\n for early stopping. When zero, no validation data will be used.\n tree_params : dict\n Parameters passed to the constructor of the learner.\n fit_params : dict\n Parameters passed to the fit method of the learner.\n\n Returns\n -------\n self\n Fitted VAEP model.\n\n \"\"\"\n nb_states = len(X)\n idx = np.random.permutation(nb_states)\n # fmt: off\n train_idx = idx[:math.floor(nb_states * (1 - val_size))]\n val_idx = idx[(math.floor(nb_states * (1 - val_size)) + 1):]\n # fmt: on\n\n # filter feature columns\n cols = self._fs.feature_column_names(self.xfns, self.nb_prev_actions)\n if not set(cols).issubset(set(X.columns)):\n missing_cols = ' and '.join(set(cols).difference(X.columns))\n raise ValueError('{} are not available in the features dataframe'.format(missing_cols))\n\n # split train and validation data\n X_train, y_train = X.iloc[train_idx][cols], y.iloc[train_idx]\n X_val, y_val = X.iloc[val_idx][cols], y.iloc[val_idx]\n\n # train classifiers F(X) = Y\n for col in list(y.columns):\n eval_set = [(X_val, y_val[col])] if val_size > 0 else None\n if learner == 'xgboost':\n self.__models[col] = self._fit_xgboost(\n X_train, y_train[col], eval_set, tree_params, fit_params\n )\n elif learner == 'catboost':\n self.__models[col] = self._fit_catboost(\n X_train, y_train[col], eval_set, tree_params, fit_params\n )\n elif learner == 'lightgbm':\n self.__models[col] = self._fit_lightgbm(\n X_train, y_train[col], eval_set, tree_params, fit_params\n )\n else:\n raise ValueError('A {} learner is not supported'.format(learner))\n return self\n\n def _fit_xgboost(\n self,\n X: pd.DataFrame,\n y: pd.Series,\n eval_set: Optional[List[Tuple[pd.DataFrame, pd.Series]]] = None,\n tree_params: Optional[Dict[str, Any]] = None,\n fit_params: Optional[Dict[str, Any]] = None,\n ) -> 'xgboost.XGBClassifier':\n if xgboost is None:\n raise ImportError('xgboost is not installed.')\n # Default settings\n if tree_params is None:\n tree_params = dict(n_estimators=100, max_depth=3)\n if fit_params is None:\n fit_params = dict(eval_metric='auc', verbose=True)\n if eval_set is not None:\n val_params = dict(early_stopping_rounds=10, eval_set=eval_set)\n fit_params = {**fit_params, **val_params}\n # Train the model\n model = xgboost.XGBClassifier(**tree_params)\n return model.fit(X, y, **fit_params)\n\n def _fit_catboost(\n self,\n X: pd.DataFrame,\n y: pd.Series,\n eval_set: Optional[List[Tuple[pd.DataFrame, pd.Series]]] = None,\n tree_params: Optional[Dict[str, Any]] = None,\n fit_params: Optional[Dict[str, Any]] = None,\n ) -> 'catboost.CatBoostClassifier':\n if catboost is None:\n raise ImportError('catboost is not installed.')\n # Default settings\n if tree_params is None:\n tree_params = dict(eval_metric='BrierScore', loss_function='Logloss', iterations=100)\n if fit_params is None:\n is_cat_feature = [c.dtype.name == 'category' for (_, c) in X.iteritems()]\n fit_params = dict(\n cat_features=np.nonzero(is_cat_feature)[0].tolist(),\n verbose=True,\n )\n if eval_set is not None:\n val_params = dict(early_stopping_rounds=10, eval_set=eval_set)\n fit_params = {**fit_params, **val_params}\n # Train the model\n model = catboost.CatBoostClassifier(**tree_params)\n return model.fit(X, y, **fit_params)\n\n def _fit_lightgbm(\n self,\n X: pd.DataFrame,\n y: pd.Series,\n eval_set: Optional[List[Tuple[pd.DataFrame, pd.Series]]] = None,\n tree_params: Optional[Dict[str, Any]] = None,\n fit_params: Optional[Dict[str, Any]] = None,\n ) -> 'lightgbm.LGBMClassifier':\n if lightgbm is None:\n raise ImportError('lightgbm is not installed.')\n if tree_params is None:\n tree_params = dict(n_estimators=100, max_depth=3)\n if fit_params is None:\n fit_params = dict(eval_metric='auc', verbose=True)\n if eval_set is not None:\n val_params = dict(early_stopping_rounds=10, eval_set=eval_set)\n fit_params = {**fit_params, **val_params}\n # Train the model\n model = lightgbm.LGBMClassifier(**tree_params)\n return model.fit(X, y, **fit_params)\n\n def _estimate_probabilities(self, X: pd.DataFrame) -> pd.DataFrame:\n # filter feature columns\n cols = self._fs.feature_column_names(self.xfns, self.nb_prev_actions)\n if not set(cols).issubset(set(X.columns)):\n missing_cols = ' and '.join(set(cols).difference(X.columns))\n raise ValueError('{} are not available in the features dataframe'.format(missing_cols))\n\n Y_hat = pd.DataFrame()\n for col in self.__models:\n Y_hat[col] = [p[1] for p in self.__models[col].predict_proba(X[cols])]\n return Y_hat\n\n def rate(\n self, game: pd.Series, game_actions: pd.DataFrame, game_states: pd.DataFrame = None\n ) -> pd.DataFrame:\n \"\"\"\n Compute the VAEP rating for the given game states.\n\n Parameters\n ----------\n game : pd.Series\n The SPADL representation of a single game.\n game_actions : pd.DataFrame\n The actions performed during `game` in the SPADL representation.\n game_states : pd.DataFrame, default=None\n DataFrame with the game state representation of each action. If\n `None`, these will be computed on-th-fly.\n\n Returns\n -------\n ratings : pd.DataFrame\n Returns the VAEP rating for each given action, as well as the\n offensive and defensive value of each action.\n \"\"\"\n if not self.__models:\n raise NotFittedError()\n\n game_actions_with_names = self._spadlcfg.add_names(game_actions)\n if game_states is None:\n game_states = self.compute_features(game, game_actions)\n\n y_hat = self._estimate_probabilities(game_states)\n p_scores, p_concedes = y_hat.scores, y_hat.concedes\n vaep_values = self._vaep.value(game_actions_with_names, p_scores, p_concedes)\n return pd.concat([game_actions, vaep_values], axis=1)\n\n def score(self, X: pd.DataFrame, y: pd.DataFrame) -> Dict[str, Dict[str, float]]:\n \"\"\"Evaluate the fit of the model on the given test data and labels.\n\n Parameters\n ----------\n X : pd.DataFrame\n Feature representation of the game states.\n y : pd.DataFrame\n Scoring and conceding labels for each game state.\n\n Returns\n -------\n score : dict\n The Brier and AUROC scores for both binary classification problems.\n \"\"\"\n if not self.__models:\n raise NotFittedError()\n\n y_hat = self._estimate_probabilities(X)\n\n scores: Dict[str, Dict[str, float]] = {}\n for col in self.__models:\n scores[col] = {}\n scores[col]['brier'] = brier_score_loss(y[col], y_hat[col])\n scores[col]['auroc'] = roc_auc_score(y[col], y_hat[col])\n\n return scores\n"
]
| [
[
"pandas.DataFrame",
"numpy.random.permutation",
"sklearn.metrics.brier_score_loss",
"numpy.nonzero",
"pandas.concat",
"sklearn.exceptions.NotFittedError",
"sklearn.metrics.roc_auc_score"
]
]
|
Bram94/lapjv | [
"98f76102255f81099383a14bc5ce6c2473d12427"
]
| [
"lapjv/tests/test_lapmod.py"
]
| [
"from pytest import mark, fixture, raises\n\nimport numpy as np\nfrom lapjv import lapjv, lapmod\n\nfrom .test_utils import (\n get_dense_8x8_int,\n get_dense_100x100_int, get_dense_100x100_int_hard, get_sparse_100x100_int,\n get_dense_1kx1k_int, get_dense_1kx1k_int_hard, get_sparse_1kx1k_int,\n get_sparse_4kx4k_int,\n get_dense_eps,\n get_platform_maxint,\n sparse_from_dense, sparse_from_masked\n)\n\n\[email protected]('cost,expected', [\n (np.array([[1000, 2, 11, 10, 8, 7, 6, 5],\n [6, 1000, 1, 8, 8, 4, 6, 7],\n [5, 12, 1000, 11, 8, 12, 3, 11],\n [11, 9, 10, 1000, 1, 9, 8, 10],\n [11, 11, 9, 4, 1000, 2, 10, 9],\n [12, 8, 5, 2, 11, 1000, 11, 9],\n [10, 11, 12, 10, 9, 12, 1000, 3],\n [10, 10, 10, 10, 6, 3, 1, 1000]]),\n (17.0, [1, 2, 0, 4, 5, 3, 7, 6], [2, 0, 1, 5, 3, 4, 7, 6])),\n # Solved in column reduction.\n (np.array([[1000, 4, 1],\n [1, 1000, 3],\n [5, 1, 1000]]),\n (3., [2, 0, 1], [1, 2, 0])),\n # Solved in augmenting row reduction.\n (np.array([[5, 1000, 3],\n [1000, 2, 2],\n [1, 5, 1000]]),\n (6., [2, 1, 0], [2, 1, 0])),\n # Needs augmentating row reduction - only a single row previously assigned.\n (np.array([[1000, 1000+1, 1000],\n [1000, 1000, 1000+1],\n [1, 2, 3]]),\n (1000+1000+1., [2, 1, 0], [2, 1, 0])),\n # Triggers the trackmate bug\n # Solution is ambiguous, [1, 0, 2] gives the same cost, depends on whether\n # in column reduction columns are iterated over from largest to smallest or\n # the other way around.\n (np.array([[10, 10, 13],\n [4, 8, 8],\n [8, 5, 8]]),\n (13+4+5, [2, 0, 1], [1, 2, 0])),\n (np.array([[11, 10, 6],\n [10, 11, 11],\n [11, 12, 15]]),\n (6+10+12, [2, 0, 1], [1, 2, 0])),\n (np.array([[12, 4, 9],\n [16, 15, 14],\n [19, 13, 17]]),\n (4+16+17, [1, 0, 2], [1, 0, 2])),\n (np.array([[2, 5, 7],\n [7, 10, 12],\n [1, 5, 9]]),\n (7+10+1, [2, 1, 0], [2, 1, 0])),\n # This triggered error in augmentation.\n (np.array([[10, 6, 14, 1],\n [17, 18, 17, 15],\n [14, 17, 15, 8],\n [11, 13, 11, 4]]),\n (6+17+14+4, [1, 2, 0, 3], [2, 0, 1, 3])),\n # Test matrix from centrosome\n (np.array([[10, 10, 13],\n [4, 8, 8],\n [8, 5, 8]]),\n (22., [2, 0, 1], [1, 2, 0])),\n # Test matrix from centrosome\n (np.array([[2, 5, 7],\n [7, 10, 12],\n [1, 5, 9]]),\n (18., [2, 1, 0], [2, 1, 0])),\n ])\ndef test_square(cost, expected):\n ret = lapmod(*sparse_from_dense(cost))\n assert len(ret) == len(expected)\n assert cost[range(cost.shape[0]), ret[1]].sum() == ret[0]\n assert cost[ret[2], range(cost.shape[1])].sum() == ret[0]\n assert ret[0] == expected[0]\n assert np.all(ret[1] == expected[1])\n assert np.all(ret[2] == expected[2])\n dense_ret = lapjv(cost)\n assert ret[0] == dense_ret[0]\n assert np.all(ret[1] == dense_ret[1])\n assert np.all(ret[2] == dense_ret[2])\n\n\[email protected]('cost,expected', [\n (np.array([[11., 20., np.inf, np.inf, np.inf],\n [12., np.inf, 12., np.inf, np.inf],\n [np.inf, 11., 10., 15., 9.],\n [15., np.inf, np.inf, 22., np.inf],\n [13., np.inf, np.inf, np.inf, 15.]], dtype=float),\n (11+12+11+22+15, [0, 2, 1, 3, 4], [0, 2, 1, 3, 4])),\n ])\ndef test_sparse_square(cost, expected):\n ret = lapmod(*sparse_from_masked(cost))\n assert len(ret) == len(expected)\n assert cost[range(cost.shape[0]), ret[1]].sum() == ret[0]\n assert cost[ret[2], range(cost.shape[1])].sum() == ret[0]\n assert ret[0] == expected[0]\n assert np.all(ret[1] == expected[1])\n assert np.all(ret[2] == expected[2])\n dense_ret = lapjv(cost)\n assert ret[0] == dense_ret[0]\n assert np.all(ret[1] == dense_ret[1])\n assert np.all(ret[2] == dense_ret[2])\n\n\n# This test triggers a possibly infinite loop in ARR.\[email protected](60)\ndef test_infs_unsolvable():\n cost = np.array([[0., 0., 0., np.inf, np.inf],\n [np.inf, np.inf, np.inf, 0., 0.],\n [np.inf, np.inf, np.inf, 0., 0.],\n [np.inf, np.inf, np.inf, 0., 0.],\n [0., 0., 0., np.inf, np.inf]], dtype=float)\n lapjv_ret = lapjv(cost)\n assert lapjv_ret[0] == np.inf\n ret = lapmod(*sparse_from_masked(cost))\n assert len(ret) == 3\n assert ret[0] == np.inf\n\n cost = np.array([[19., 22., 16., np.inf, np.inf],\n [np.inf, np.inf, np.inf, 4., 13.],\n [np.inf, np.inf, np.inf, 3., 14.],\n [np.inf, np.inf, np.inf, 10., 12.],\n [11., 14., 13., np.inf, np.inf]], dtype=float)\n lapjv_ret = lapjv(cost)\n assert lapjv_ret[0] == np.inf\n ret = lapmod(*sparse_from_masked(cost))\n assert len(ret) == 3\n assert ret[0] == np.inf\n\n\ndef test_inf_unique():\n cost = np.array([[1000, 4, 1],\n [1, 1000, 3],\n [5, 1, 1000]])\n cost_ext = np.empty((4, 4))\n cost_ext[:] = np.inf\n cost_ext[:3, :3] = cost\n cost_ext[3, 3] = 0\n with raises(ValueError):\n ret = lapmod(*sparse_from_dense(cost_ext))\n ret = lapmod(*sparse_from_masked(cost_ext))\n assert len(ret) == 3\n assert ret[0] == 3.\n assert np.all(ret[1] == [2, 0, 1, 3])\n\n\[email protected](2)\ndef test_inf_col():\n cost = np.array([[0., np.inf, 0., 0., np.inf],\n [np.inf, np.inf, 0., 0., 0.],\n [np.inf, np.inf, np.inf, 0., np.inf],\n [np.inf, np.inf, np.inf, 0., 0.],\n [0., np.inf, 0., np.inf, np.inf]], dtype=float)\n with raises(ValueError):\n ret = lapmod(*sparse_from_dense(cost))\n ret = lapmod(*sparse_from_masked(cost))\n assert len(ret) == 3\n assert ret[0] == np.inf\n\n\[email protected](2)\ndef test_inf_row():\n cost = np.array([[0., 0., 0., 0., np.inf],\n [np.inf, np.inf, 0., 0., 0.],\n [np.inf, np.inf, np.inf, np.inf, np.inf],\n [np.inf, np.inf, np.inf, 0., 0.],\n [0., 0., 0., np.inf, np.inf]], dtype=float)\n with raises(ValueError):\n ret = lapmod(*sparse_from_dense(cost))\n ret = lapmod(*sparse_from_masked(cost))\n assert len(ret) == 3\n assert ret[0] == np.inf\n\n\ndef test_all_inf():\n cost = np.empty((5, 5), dtype=float)\n cost[:] = np.inf\n with raises(ValueError):\n lapmod(*sparse_from_dense(cost))\n with raises(ValueError):\n lapmod(*sparse_from_masked(cost))\n\n\n@fixture\ndef dense_8x8_int():\n return get_dense_8x8_int()\n\n\n@fixture\ndef dense_100x100_int():\n return get_dense_100x100_int()\n\n\n@fixture\ndef dense_100x100_int_hard():\n return get_dense_100x100_int_hard()\n\n\n@fixture\ndef sparse_100x100_int():\n return get_sparse_100x100_int()\n\n\n@fixture\ndef dense_1kx1k_int():\n return get_dense_1kx1k_int()\n\n\n@fixture\ndef dense_1kx1k_int_hard():\n return get_dense_1kx1k_int_hard()\n\n\n@fixture\ndef sparse_1kx1k_int():\n return get_sparse_1kx1k_int()\n\n\n@fixture\ndef sparse_4kx4k_int():\n return get_sparse_4kx4k_int()\n\n\n@fixture\ndef dense_eps():\n return get_dense_eps()\n\n\[email protected](60)\ndef test_eps(dense_eps):\n cost, opt = dense_eps\n ret = lapmod(*sparse_from_dense(cost))\n assert len(ret) == 3\n assert ret[0] == opt\n\n\ndef test_dense_100x100_int(dense_100x100_int):\n cost, opt = dense_100x100_int\n ret = lapmod(*sparse_from_dense(cost))\n assert len(ret) == 3\n assert ret[0] == opt\n lapjv_ret = lapjv(cost)\n assert ret[0] == lapjv_ret[0]\n\n\ndef test_dense_100x100_int_hard(dense_100x100_int_hard):\n cost, opt = dense_100x100_int_hard\n ret = lapmod(*sparse_from_dense(cost))\n assert len(ret) == 3\n assert ret[0] == opt\n lapjv_ret = lapjv(cost)\n assert ret[0] == lapjv_ret[0]\n\n\n# TODO: too sparse unsolvable matrices cause sometimne IndexError, easily\n# generated - just set the mask threshold low enough\ndef test_sparse_100x100_int(sparse_100x100_int):\n cost, mask, opt = sparse_100x100_int\n ret = lapmod(*sparse_from_masked(cost, mask))\n assert len(ret) == 3\n assert ret[0] == opt\n\n\[email protected](60)\ndef test_dense_1kx1k_int(dense_1kx1k_int):\n cost, opt = dense_1kx1k_int\n ret = lapmod(*sparse_from_dense(cost))\n assert len(ret) == 3\n assert ret[0] == opt\n lapjv_ret = lapjv(cost)\n assert ret[0] == lapjv_ret[0]\n\n\[email protected](60)\ndef test_dense_1kx1k_int_hard(dense_1kx1k_int_hard):\n cost, opt = dense_1kx1k_int_hard\n ret = lapmod(*sparse_from_dense(cost))\n assert len(ret) == 3\n assert ret[0] == opt\n lapjv_ret = lapjv(cost)\n assert ret[0] == lapjv_ret[0]\n\n\[email protected](60)\ndef test_sparse_1kx1k_int(sparse_1kx1k_int):\n cost, mask, opt = sparse_1kx1k_int\n ret = lapmod(*sparse_from_masked(cost, mask))\n assert len(ret) == 3\n assert ret[0] == opt\n cost[~mask] = get_platform_maxint()\n lapjv_ret = lapjv(cost)\n assert ret[0] == lapjv_ret[0]\n\n\[email protected](60)\ndef test_sparse_4kx4k_int(sparse_4kx4k_int):\n cost, mask, opt = sparse_4kx4k_int\n ret = lapmod(*sparse_from_masked(cost, mask))\n assert len(ret) == 3\n assert ret[0] == opt\n cost[~mask] = get_platform_maxint()\n lapjv_ret = lapjv(cost)\n assert ret[0] == lapjv_ret[0]\n"
]
| [
[
"numpy.all",
"numpy.array",
"numpy.empty"
]
]
|
karthiks1701/lio | [
"f80487b73f248f5a3631b92a56f8db42d2c86d8a"
]
| [
"lio/env/ssd_discrete_reward.py"
]
| [
"\"\"\"Wrapper around Sequential Social Dilemma environment.\n\nThis is used for running baseline methods (e.g., policy gradient and LOLA)\nwhere the new discrete action space is {original actions} X {reward-giving action}.\n\"\"\"\nimport numpy as np\n\nfrom lio.env import ssd\n\n\nclass Env(ssd.Env):\n\n def __init__(self, config_env):\n\n super().__init__(config_env)\n\n # Allow simultaneous move and give-reward\n # The second half of action range is interpreted as\n # (base action and give-reward-to-the-other-agent)\n # Only support 2-player for now\n assert self.n_agents == 2\n self.reward_coeff = self.config.reward_coeff # cost multiplier\n self.reward_value = self.config.reward_value\n self.idx_recipient = self.config.idx_recipient if self.config.asymmetric else None\n self.l_action_base = self.l_action\n self.l_action = 2 * self.l_action_base\n\n def step(self, actions):\n \"\"\"Takes a step in env.\n \n Args:\n actions: list of integers\n\n Returns:\n List of observations, list of rewards, done, info\n \"\"\"\n # The second half of action range is interpreted as\n # simultaneous (base action and give-reward) \n actions = [self.map_to_orig[a % self.l_action_base] for a in actions]\n actions_dict = {'agent-%d'%idx : actions[idx]\n for idx in range(self.n_agents)}\n\n # all objects returned by env.step are dicts\n obs_next, rewards, dones, info = self.env.step(actions_dict)\n self.steps += 1\n\n obs_next = self.process_obs(obs_next)\n rewards = list(rewards.values())\n if self.cleaning_penalty > 0:\n for idx in range(self.n_agents):\n if actions[idx] == 8:\n rewards[idx] -= self.cleaning_penalty\n\n # Store the extrinsic rewards here for use in evaluation,\n # separately from the modifications due to reward-giving actions below\n info['rewards_env'] = np.array(rewards)\n\n for agent_id in range(self.n_agents):\n if self.config.asymmetric and agent_id == self.idx_recipient:\n continue\n # Agent exercised its reward-giving action\n if actions[agent_id] >= self.l_action_base:\n rewards[agent_id] -= self.reward_coeff * self.reward_value\n # Assumes N=2\n rewards[1-agent_id] += self.reward_value\n\n done = dones['__all__'] or self.steps == self.max_steps\n\n return obs_next, rewards, done, info\n"
]
| [
[
"numpy.array"
]
]
|
wuxiaobai24/DA-RNN | [
"de5ee8699024cb97d1f42ca120dc687b9c7bc2bb"
]
| [
"main.py"
]
| [
"import time\nfrom torch import mode\nfrom data import NasdaqDataset\nfrom model import DARNN\nimport torch\nfrom torch.utils.data import DataLoader\nimport numpy as np\nfrom tqdm import tqdm\nimport argparse\n\n\n\ndef MAE(pred, target):\n return (pred - target).abs().mean()\n\n\ndef MSE(pred, target):\n return torch.pow(pred - target, 2).mean()\n\n\ndef RMSE(pred, target):\n return MSE(pred, target).sqrt()\n\n\ndef MAPE(pred, target):\n return ((pred - target).abs() / (target.abs() + 1e-8)).mean()\n\nparser = argparse.ArgumentParser(description='DA-RNN')\nparser.add_argument('--path', type=str, default='./dataset/nasdaq100/small/nasdaq100_padding.csv')\nparser.add_argument('--batchsize', type=int, default=128)\nparser.add_argument('--encoder_hidden', type=int, default=128)\nparser.add_argument('--decoder_hidden', type=int, default=128)\nparser.add_argument('--timestep', type=int, default=10)\nparser.add_argument('--epochs', type=int, default=10)\nparser.add_argument('--lr', type=float, default=0.01)\n\nargs = parser.parse_args()\nprint(args)\n# constans parameters\nN_FEATURE = 81\nN_ENCODER_HIDDEN = args.encoder_hidden\nN_DECODER_HIDDEN = args.decoder_hidden\nN_TAGET = 1\nT = args.timestep\nBATCH_SIZE = args.batchsize\nN_EPOCHS = args.epochs\nSEED = 24\nLEARNING_RATE = args.lr\n\n# set seed\n\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\n\n\nPATH = './nasdaq100_padding.csv'\ntrain_data = NasdaqDataset(PATH, T, 'train')\nval_data = NasdaqDataset(PATH, T, 'val', scaler=train_data.scaler, \n target_scaler=train_data.target_scaler)\ntest_data = NasdaqDataset(PATH, T, 'test', scaler=train_data.scaler,\n target_scaler=train_data.target_scaler)\n\nprint(\"Train data's len is\", len(train_data))\nprint(\"Val data's len is\", len(val_data))\nprint(\"Test data's len is\", len(test_data))\n\ntrain_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)\nval_loader = DataLoader(val_data, batch_size=BATCH_SIZE)\ntest_loader = DataLoader(test_data, batch_size=BATCH_SIZE)\n\n\n# encoder = InputAttnEncoder(N_FEATURE, N_ENCODER_HIDDEN, T)\n# decoder = TemporalAttenDecoder(N_ENCODER_HIDDEN, N_TAGET, N_DECODER_HIDDEN, T)\nuse_cuda = torch.cuda.is_available()\ndevice = 'cuda' if use_cuda else 'cpu'\nprint('device is', device)\n\nmodel = DARNN(N_FEATURE, N_TAGET, N_ENCODER_HIDDEN,\n N_DECODER_HIDDEN, T).to(device)\n\nloss_func = torch.nn.MSELoss()\n\noptimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n\nfor epoch in range(1, N_EPOCHS + 1):\n model.train()\n\n pbar = tqdm(enumerate(train_loader), total=len(train_loader))\n start_time = time.time()\n train_losses = []\n for i, data in pbar:\n (feat, target), y = data\n if use_cuda:\n feat = feat.cuda()\n target = target.cuda()\n y = y.cuda()\n prepare_time = start_time - time.time()\n\n optimizer.zero_grad()\n pred = model(feat, target)\n loss = loss_func(pred.view(-1), y)\n train_losses.append(loss.item())\n loss.backward()\n optimizer.step()\n\n process_time = start_time-time.time()-prepare_time\n pbar.set_description(\"Compute efficiency: {:.2f}, epoch: {}/{}\".format(\n process_time/(process_time + prepare_time), epoch, N_EPOCHS + 1))\n start_time = time.time()\n\n if epoch % 1 == 0:\n model.eval()\n pbar = tqdm(enumerate(val_loader), total=len(val_loader))\n losses = {\n 'MAE': [],\n 'RMSE': [],\n 'MAPE': []\n }\n for i, data in pbar:\n (feat, target), y = data\n if use_cuda:\n feat = feat.cuda()\n target = target.cuda()\n y = y.cuda()\n pred = model(feat, target)\n\n losses['MAE'].append(MAE(pred, y.view(-1)).item())\n losses['RMSE'].append(RMSE(pred, y.view(-1)).item())\n losses['MAPE'].append(MAPE(pred, y.view(-1)).item())\n \n print('Epoch {:d}: MAE = {:.2f}, RMSE = {:.2f}, MAPE = {:.2f}'.format(\n epoch, np.mean(losses['MAE']),\n np.mean(losses['RMSE']),\n np.mean(losses['MAPE'])))\n\npbar = tqdm(enumerate(test_loader), total=len(val_loader))\nlosses = {\n 'MAE': [],\n 'RMSE': [],\n 'MAPE': []\n}\nfor i, data in pbar:\n (feat, target), y = data\n if use_cuda:\n feat = feat.cuda()\n target = target.cuda()\n y = y.cuda()\n pred = model(feat, target)\n losses['MAE'].append(MAE(pred, y.view(-1)).item())\n losses['RMSE'].append(RMSE(pred, y.view(-1)).item())\n losses['MAPE'].append(MAPE(pred, y.view(-1)).item())\nprint('Test: MAE = {:.2f}, RMSE = {:.2f}, MAPE = {:.2f}'.format(\n np.mean(losses['MAE']),\n np.mean(losses['RMSE']),\n np.mean(losses['MAPE'])))\n"
]
| [
[
"torch.cuda.manual_seed",
"torch.nn.MSELoss",
"numpy.random.seed",
"numpy.mean",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.pow"
]
]
|
matbocz/kurs-python-pwsz-elblag | [
"47a6f02691dfcffe8de346923099d13ffac4ffa4"
]
| [
"PythonLab7/PythonLab7Zad1i2.py"
]
| [
"from cv2 import cv2\nimport numpy as np\nimport time\n\n\ndef mouse_callback(event, x, y, flags, param):\n mouse_x = int(x / CELL_SIZE)\n mouse_y = int(y / CELL_SIZE)\n\n if event == cv2.EVENT_LBUTTONDOWN:\n CELL_FIELD[mouse_x][mouse_y] = LIVING_CELL\n if event == cv2.EVENT_RBUTTONDOWN:\n CELL_FIELD[mouse_x][mouse_y] = DEAD_CELL\n\n\ndef prepare_pop(cell_field):\n next_gen = [DEAD_CELL] * CELLS_HORIZONTALLY\n for i in range(CELLS_HORIZONTALLY):\n next_gen[i] = [DEAD_CELL] * CELLS_VERTICALLY\n\n for y in range(CELLS_VERTICALLY):\n for x in range(CELLS_HORIZONTALLY):\n pop = 0\n\n try:\n if cell_field[x - 1][y - 1] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n try:\n if cell_field[x][y - 1] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n try:\n if cell_field[x + 1][y - 1] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n\n try:\n if cell_field[x - 1][y] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n try:\n if cell_field[x + 1][y] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n\n try:\n if cell_field[x - 1][y + 1] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n try:\n if cell_field[x][y + 1] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n try:\n if cell_field[x + 1][y + 1] == LIVING_CELL:\n pop += 1\n except IndexError:\n pass\n\n if cell_field[x][y] == LIVING_CELL and (pop < 2 or pop > 3):\n next_gen[x][y] = DEAD_CELL\n elif cell_field[x][y] == LIVING_CELL and (pop == 3 or pop == 2):\n next_gen[x][y] = LIVING_CELL\n elif cell_field[x][y] == DEAD_CELL and pop == 3:\n next_gen[x][y] = LIVING_CELL\n\n return next_gen\n\n\ndef draw_pop():\n for y in range(CELLS_VERTICALLY):\n for x in range(CELLS_HORIZONTALLY):\n if CELL_FIELD[x][y] == LIVING_CELL:\n cv2.rectangle(WINDOW, (x*CELL_SIZE, y*CELL_SIZE),\n (x*CELL_SIZE+CELL_SIZE, y*CELL_SIZE+CELL_SIZE), CELL_FILL_COLOR, -1)\n cv2.rectangle(WINDOW, (x*CELL_SIZE, y*CELL_SIZE),\n (x*CELL_SIZE+CELL_SIZE, y*CELL_SIZE+CELL_SIZE), CELL_BORDER_COLOR, 1)\n\n\nWINDOW_NAME = \"OpenCV Game of Life\"\nWINDOW_FILL_COLOR = np.array([0, 0, 0], np.uint8)\nWINDOW_WIDTH = 500\nWINDOW_HEIGHT = 500\n\nVIDEO_NAME = WINDOW_NAME + \".avi\"\nVIDEO_FRAMES_PER_SECOND = 60\nvideo = cv2.VideoWriter(VIDEO_NAME, cv2.VideoWriter_fourcc(\n *\"MJPG\"), VIDEO_FRAMES_PER_SECOND, (WINDOW_WIDTH, WINDOW_HEIGHT))\n\nLIVING_CELL = 1\nDEAD_CELL = 0\n\nCELL_SIZE = 10\nCELLS_HORIZONTALLY = int(WINDOW_WIDTH / CELL_SIZE)\nCELLS_VERTICALLY = int(WINDOW_HEIGHT / CELL_SIZE)\nCELL_FILL_COLOR = (255, 255, 255)\nCELL_BORDER_COLOR = (255, 0, 0)\n\nCELL_FIELD = [DEAD_CELL] * CELLS_HORIZONTALLY\nfor i in range(CELLS_HORIZONTALLY):\n CELL_FIELD[i] = [DEAD_CELL] * CELLS_VERTICALLY\n\ncv2.namedWindow(WINDOW_NAME)\ncv2.setMouseCallback(WINDOW_NAME, mouse_callback)\n\nrun = False\nwhile True:\n k = cv2.waitKey(1) & 0xFF\n\n if k == 13: # Enter KEY\n run = True\n if k == 32: # Space KEY\n run = False\n if k == 115: # s KEY\n CELL_FIELD = prepare_pop(CELL_FIELD)\n if k == 27: # Escape KEY\n video.release()\n break\n\n if run == True:\n CELL_FIELD = prepare_pop(CELL_FIELD)\n\n WINDOW = np.full((WINDOW_HEIGHT, WINDOW_WIDTH, 3), WINDOW_FILL_COLOR)\n draw_pop()\n\n video.write(WINDOW)\n cv2.imshow(WINDOW_NAME, WINDOW)\n"
]
| [
[
"numpy.full",
"numpy.array"
]
]
|
david-joy/bmi203-final | [
"9735a2e030f0a8294c58629b0bee03a7e54257b3"
]
| [
"setup.py"
]
| [
"from distutils.core import setup\nfrom Cython.Build import cythonize\nimport numpy as np\n\nsetup(\n name='BMI203-FinalProject',\n ext_modules=cythonize(\"final_project/_alignment.pyx\"),\n include_dirs=[np.get_include()],\n)\n"
]
| [
[
"numpy.get_include"
]
]
|
XTmeng/Object_detection | [
"0441aee2b0e841eb2e44e60d6ba31146ec832f05"
]
| [
"lib/nets/network.py"
]
| [
"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim import losses\nfrom tensorflow.contrib.slim import arg_scope\n\nimport numpy as np\n\nfrom layer_utils.snippets import generate_anchors_pre, generate_anchors_pre_tf\nfrom layer_utils.proposal_layer import proposal_layer, proposal_layer_tf\nfrom layer_utils.proposal_top_layer import proposal_top_layer, proposal_top_layer_tf\nfrom layer_utils.anchor_target_layer import anchor_target_layer\nfrom layer_utils.proposal_target_layer import proposal_target_layer\nfrom utils.visualization import draw_bounding_boxes\n\nfrom model.config import cfg\n\nclass Network(object):\n def __init__(self):\n self._predictions = {}\n self._losses = {}\n self._anchor_targets = {}\n self._proposal_targets = {}\n self._layers = {}\n self._gt_image = None\n self._act_summaries = []\n self._score_sneummaries = {}\n self._train_summaries = []\n self._event_summaries = {}\n self._variables_to_fix = {}\n\n def _add_gt_image(self):\n # add back mean\n image = self._image + cfg.PIXEL_MEANS\n # BGR to RGB (opencv uses BGR)\n resized = tf.image.resize_bilinear(image, tf.to_int32(self._im_info[:2] / self._im_info[2]))\n self._gt_image = tf.reverse(resized, axis=[-1])\n\n def _add_gt_image_summary(self):\n # use a customized visualization function to visualize the boxes\n if self._gt_image is None:\n self._add_gt_image()\n image = tf.py_func(draw_bounding_boxes, \n [self._gt_image, self._gt_boxes, self._im_info],\n tf.float32, name=\"gt_boxes\")\n \n return tf.summary.image('GROUND_TRUTH', image)\n\n def _add_act_summary(self, tensor):\n tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor)\n tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction',\n tf.nn.zero_fraction(tensor))\n\n def _add_score_summary(self, key, tensor):\n tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)\n\n def _add_train_summary(self, var):\n tf.summary.histogram('TRAIN/' + var.op.name, var)\n\n def _reshape_layer(self, bottom, num_dim, name):\n input_shape = tf.shape(bottom)\n with tf.variable_scope(name) as scope:\n # change the channel to the caffe format\n to_caffe = tf.transpose(bottom, [0, 3, 1, 2])\n # then force it to have channel 2\n reshaped = tf.reshape(to_caffe,\n tf.concat(axis=0, values=[[1, num_dim, -1], [input_shape[2]]]))\n # then swap the channel back\n to_tf = tf.transpose(reshaped, [0, 2, 3, 1])\n return to_tf\n\n def _softmax_layer(self, bottom, name):\n if name.startswith('rpn_cls_prob_reshape'): # startswith判断文本是否以某个字符开始,endswith判断文本是否以某个字符结束\n input_shape = tf.shape(bottom)\n bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]]) # a[-1] last item in the array\n reshaped_score = tf.nn.softmax(bottom_reshaped, name=name)\n return tf.reshape(reshaped_score, input_shape)\n return tf.nn.softmax(bottom, name=name)\n\n def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name):\n with tf.variable_scope(name) as scope:\n if cfg.USE_E2E_TF:\n rois, rpn_scores = proposal_top_layer_tf(\n rpn_cls_prob,\n rpn_bbox_pred,\n self._im_info,\n self._feat_stride,\n self._anchors,\n self._num_anchors\n )\n else:\n rois, rpn_scores = tf.py_func(proposal_top_layer,\n [rpn_cls_prob, rpn_bbox_pred, self._im_info,\n self._feat_stride, self._anchors, self._num_anchors],\n [tf.float32, tf.float32], name=\"proposal_top\")\n \n rois.set_shape([cfg.TEST.RPN_TOP_N, 5])\n rpn_scores.set_shape([cfg.TEST.RPN_TOP_N, 1])\n\n return rois, rpn_scores\n\n def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name):\n with tf.variable_scope(name) as scope:\n if cfg.USE_E2E_TF:\n rois, rpn_scores = proposal_layer_tf(\n rpn_cls_prob,\n rpn_bbox_pred,\n self._im_info,\n self._mode,\n self._feat_stride,\n self._anchors,\n self._num_anchors\n )\n else:\n rois, rpn_scores = tf.py_func(proposal_layer,\n [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode,\n self._feat_stride, self._anchors, self._num_anchors],\n [tf.float32, tf.float32], name=\"proposal\")\n\n rois.set_shape([None, 5])\n rpn_scores.set_shape([None, 1])\n\n return rois, rpn_scores\n\n # Only use it if you have roi_pooling op written in tf.image\n def _roi_pool_layer(self, bootom, rois, name):\n with tf.variable_scope(name) as scope:\n return tf.image.roi_pooling(bootom, rois,\n pooled_height=cfg.POOLING_SIZE,\n pooled_width=cfg.POOLING_SIZE,\n spatial_scale=1. / 16.)[0]\n\n def _crop_pool_layer(self, bottom, rois, name):\n with tf.variable_scope(name) as scope:\n batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name=\"batch_id\"), [1])\n # Get the normalized coordinates of bounding boxes,是因为在使用 tf.image.crop_and_resize函数时bbox必须是归一化之后的\n bottom_shape = tf.shape(bottom)\n height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])\n width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])\n x1 = tf.slice(rois, [0, 1], [-1, 1], name=\"x1\") / width\n y1 = tf.slice(rois, [0, 2], [-1, 1], name=\"y1\") / height\n x2 = tf.slice(rois, [0, 3], [-1, 1], name=\"x2\") / width\n y2 = tf.slice(rois, [0, 4], [-1, 1], name=\"y2\") / height\n # Won't be back-propagated to rois anyway, but to save time\n bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))\n pre_pool_size = cfg.POOLING_SIZE * 2 #pooling_size = 7\n crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name=\"crops\")\n # https://blog.csdn.net/m0_38024332/article/details/81779544 具体看这个网址,这段就差不多了\n return slim.max_pool2d(crops, [2, 2], padding='SAME')\n\n def _dropout_layer(self, bottom, name, ratio=0.5):\n return tf.nn.dropout(bottom, ratio, name=name)\n\n def _anchor_target_layer(self, rpn_cls_score, name):\n with tf.variable_scope(name) as scope:\n rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(\n anchor_target_layer,\n [rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors],\n [tf.float32, tf.float32, tf.float32, tf.float32],\n name=\"anchor_target\")\n\n rpn_labels.set_shape([1, 1, None, None])\n rpn_bbox_targets.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_inside_weights.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_outside_weights.set_shape([1, None, None, self._num_anchors * 4])\n\n rpn_labels = tf.to_int32(rpn_labels, name=\"to_int32\")\n self._anchor_targets['rpn_labels'] = rpn_labels\n self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets\n self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights\n self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights\n\n self._score_summaries.update(self._anchor_targets)\n\n return rpn_labels\n\n def _proposal_target_layer(self, rois, roi_scores, name):\n with tf.variable_scope(name) as scope:\n rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = tf.py_func(\n proposal_target_layer,\n [rois, roi_scores, self._gt_boxes, self._num_classes],\n [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32],\n name=\"proposal_target\")\n\n rois.set_shape([cfg.TRAIN.BATCH_SIZE, 5])\n roi_scores.set_shape([cfg.TRAIN.BATCH_SIZE])\n labels.set_shape([cfg.TRAIN.BATCH_SIZE, 1])\n bbox_targets.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])\n bbox_inside_weights.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])\n bbox_outside_weights.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])\n\n self._proposal_targets['rois'] = rois\n self._proposal_targets['labels'] = tf.to_int32(labels, name=\"to_int32\")\n self._proposal_targets['bbox_targets'] = bbox_targets\n self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights\n self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights\n\n self._score_summaries.update(self._proposal_targets)\n\n return rois, roi_scores\n\n def _anchor_component(self):\n with tf.variable_scope('ANCHOR_' + self._tag) as scope:\n # just to get the shape right\n height = tf.to_int32(tf.ceil(self._im_info[0] / np.float32(self._feat_stride[0])))\n width = tf.to_int32(tf.ceil(self._im_info[1] / np.float32(self._feat_stride[0])))\n if cfg.USE_E2E_TF:\n anchors, anchor_length = generate_anchors_pre_tf(\n height,\n width,\n self._feat_stride,\n self._anchor_scales,\n self._anchor_ratios\n )\n else:\n anchors, anchor_length = tf.py_func(generate_anchors_pre,\n [height, width,\n self._feat_stride, self._anchor_scales, self._anchor_ratios],\n [tf.float32, tf.int32], name=\"generate_anchors\")\n anchors.set_shape([None, 4])\n anchor_length.set_shape([])\n self._anchors = anchors\n self._anchor_length = anchor_length\n\n # RPN网络和分类回归网络\n def _build_network(self, is_training=True):\n # select initializers\n if cfg.TRAIN.TRUNCATED:\n initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)\n initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)\n else:\n initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)\n initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)\n\n net_conv = self._image_to_head(is_training)\n with tf.variable_scope(self._scope, self._scope):\n # build the anchors for the image\n self._anchor_component()\n # region proposal network\n rois = self._region_proposal(net_conv, is_training, initializer)\n # region of interest pooling\n if cfg.POOLING_MODE == 'crop':\n pool5 = self._crop_pool_layer(net_conv, rois, \"pool5\")\n else:\n raise NotImplementedError\n\n fc7 = self._head_to_tail(pool5, is_training)\n with tf.variable_scope(self._scope, self._scope):\n # region classification\n cls_prob, bbox_pred = self._region_classification(fc7, is_training, \n initializer, initializer_bbox)\n\n self._score_summaries.update(self._predictions)\n\n return rois, cls_prob, bbox_pred\n\n def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):\n sigma_2 = sigma ** 2\n box_diff = bbox_pred - bbox_targets\n in_box_diff = bbox_inside_weights * box_diff\n abs_in_box_diff = tf.abs(in_box_diff)\n smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))\n in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \\\n + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)\n out_loss_box = bbox_outside_weights * in_loss_box\n loss_box = tf.reduce_mean(tf.reduce_sum(\n out_loss_box,\n axis=dim\n ))\n return loss_box\n\n def _add_losses(self, sigma_rpn=3.0):\n with tf.variable_scope('LOSS_' + self._tag) as scope:\n # 分类loss都采用的是:softmax_cross_entropy;回归loss都采用的是:smooth_L1_loss\n # RPN, class loss\n rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2])\n rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1])\n rpn_select = tf.where(tf.not_equal(rpn_label, -1))\n rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])\n rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])\n rpn_cross_entropy = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))\n\n # RPN, bbox loss\n rpn_bbox_pred = self._predictions['rpn_bbox_pred']\n rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']\n rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']\n rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']\n rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])\n\n # RCNN, class loss\n cls_score = self._predictions[\"cls_score\"]\n label = tf.reshape(self._proposal_targets[\"labels\"], [-1])\n cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=cls_score, labels=label))\n\n # RCNN, bbox loss\n bbox_pred = self._predictions['bbox_pred']\n bbox_targets = self._proposal_targets['bbox_targets']\n bbox_inside_weights = self._proposal_targets['bbox_inside_weights']\n bbox_outside_weights = self._proposal_targets['bbox_outside_weights']\n loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)\n\n self._losses['cross_entropy'] = cross_entropy\n self._losses['loss_box'] = loss_box\n self._losses['rpn_cross_entropy'] = rpn_cross_entropy\n self._losses['rpn_loss_box'] = rpn_loss_box\n\n loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box\n regularization_loss = tf.add_n(tf.losses.get_regularization_losses(), 'regu')\n self._losses['total_loss'] = loss + regularization_loss\n\n self._event_summaries.update(self._losses)\n\n return loss\n\n def _region_proposal(self, net_conv, is_training, initializer):\n rpn = slim.conv2d(net_conv, cfg.RPN_CHANNELS, [3, 3], trainable=is_training, weights_initializer=initializer,\n scope=\"rpn_conv/3x3\")\n self._act_summaries.append(rpn)\n rpn_cls_score = slim.conv2d(rpn, self._num_anchors * 2, [1, 1], trainable=is_training,\n weights_initializer=initializer,\n padding='VALID', activation_fn=None, scope='rpn_cls_score')\n # change it so that the score has 2 as its channel size\n rpn_cls_score_reshape = self._reshape_layer(rpn_cls_score, 2, 'rpn_cls_score_reshape')\n rpn_cls_prob_reshape = self._softmax_layer(rpn_cls_score_reshape, \"rpn_cls_prob_reshape\")\n rpn_cls_pred = tf.argmax(tf.reshape(rpn_cls_score_reshape, [-1, 2]), axis=1, name=\"rpn_cls_pred\")\n rpn_cls_prob = self._reshape_layer(rpn_cls_prob_reshape, self._num_anchors * 2, \"rpn_cls_prob\")\n rpn_bbox_pred = slim.conv2d(rpn, self._num_anchors * 4, [1, 1], trainable=is_training,\n weights_initializer=initializer,\n padding='VALID', activation_fn=None, scope='rpn_bbox_pred')\n if is_training:\n rois, roi_scores = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred, \"rois\")\n rpn_labels = self._anchor_target_layer(rpn_cls_score, \"anchor\")\n # Try to have a deterministic order for the computing graph, for reproducibility\n with tf.control_dependencies([rpn_labels]): # 只有rpn_labels被执行之后下面的才能执行,数据流控制\n rois, _ = self._proposal_target_layer(rois, roi_scores, \"rpn_rois\")\n else:\n if cfg.TEST.MODE == 'nms':\n rois, _ = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred, \"rois\")\n elif cfg.TEST.MODE == 'top':\n rois, _ = self._proposal_top_layer(rpn_cls_prob, rpn_bbox_pred, \"rois\")\n else:\n raise NotImplementedError\n\n self._predictions[\"rpn_cls_score\"] = rpn_cls_score\n self._predictions[\"rpn_cls_score_reshape\"] = rpn_cls_score_reshape\n self._predictions[\"rpn_cls_prob\"] = rpn_cls_prob\n self._predictions[\"rpn_cls_pred\"] = rpn_cls_pred\n self._predictions[\"rpn_bbox_pred\"] = rpn_bbox_pred\n self._predictions[\"rois\"] = rois\n\n return rois\n\n def _region_classification(self, fc7, is_training, initializer, initializer_bbox):\n cls_score = slim.fully_connected(fc7, self._num_classes, \n weights_initializer=initializer,\n trainable=is_training,\n activation_fn=None, scope='cls_score')\n cls_prob = self._softmax_layer(cls_score, \"cls_prob\")\n cls_pred = tf.argmax(cls_score, axis=1, name=\"cls_pred\")\n bbox_pred = slim.fully_connected(fc7, self._num_classes * 4, \n weights_initializer=initializer_bbox,\n trainable=is_training,\n activation_fn=None, scope='bbox_pred')\n\n self._predictions[\"cls_score\"] = cls_score\n self._predictions[\"cls_pred\"] = cls_pred\n self._predictions[\"cls_prob\"] = cls_prob\n self._predictions[\"bbox_pred\"] = bbox_pred\n\n return cls_prob, bbox_pred\n\n def _image_to_head(self, is_training, reuse=None):\n raise NotImplementedError\n\n def _head_to_tail(self, pool5, is_training, reuse=None):\n raise NotImplementedError\n\n def create_architecture(self, mode, num_classes, tag=None,\n anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])\n self._im_info = tf.placeholder(tf.float32, shape=[3])\n self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])\n self._tag = tag\n\n self._num_classes = num_classes\n self._mode = mode\n self._anchor_scales = anchor_scales\n self._num_scales = len(anchor_scales)\n\n self._anchor_ratios = anchor_ratios\n self._num_ratios = len(anchor_ratios)\n\n self._num_anchors = self._num_scales * self._num_ratios\n\n training = mode == 'TRAIN'\n testing = mode == 'TEST'\n\n assert tag != None\n\n # handle most of the regularizers here\n weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)\n if cfg.TRAIN.BIAS_DECAY:\n biases_regularizer = weights_regularizer\n else:\n biases_regularizer = tf.no_regularizer\n\n # list as many types of layers as possible, even if they are not used now\n with arg_scope([slim.conv2d, slim.conv2d_in_plane, \\\n slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], \n weights_regularizer=weights_regularizer,\n biases_regularizer=biases_regularizer, \n biases_initializer=tf.constant_initializer(0.0)): \n rois, cls_prob, bbox_pred = self._build_network(training)\n\n layers_to_output = {'rois': rois}\n\n for var in tf.trainable_variables():\n self._train_summaries.append(var)\n\n if testing:\n stds = np.tile(np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (self._num_classes))\n means = np.tile(np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (self._num_classes))\n self._predictions[\"bbox_pred\"] *= stds\n self._predictions[\"bbox_pred\"] += means\n else:\n self._add_losses()\n layers_to_output.update(self._losses)\n\n val_summaries = []\n with tf.device(\"/cpu:0\"):\n val_summaries.append(self._add_gt_image_summary())\n for key, var in self._event_summaries.items():\n val_summaries.append(tf.summary.scalar(key, var))\n for key, var in self._score_summaries.items():\n self._add_score_summary(key, var)\n for var in self._act_summaries:\n self._add_act_summary(var)\n for var in self._train_summaries:\n self._add_train_summary(var)\n\n self._summary_op = tf.summary.merge_all()\n self._summary_op_val = tf.summary.merge(val_summaries)\n\n layers_to_output.update(self._predictions)\n\n return layers_to_output\n\n def get_variables_to_restore(self, variables, var_keep_dic):\n raise NotImplementedError\n\n def fix_variables(self, sess, pretrained_model):\n raise NotImplementedError\n\n # Extract the head feature maps, for example for vgg16 it is conv5_3\n # only useful during testing mode\n def extract_head(self, sess, image):\n feed_dict = {self._image: image}\n feat = sess.run(self._layers[\"head\"], feed_dict=feed_dict)\n return feat\n\n # only useful during testing mode\n def test_image(self, sess, image, im_info):\n feed_dict = {self._image: image,\n self._im_info: im_info}\n\n cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions[\"cls_score\"],\n self._predictions['cls_prob'],\n self._predictions['bbox_pred'],\n self._predictions['rois']],\n feed_dict=feed_dict)\n return cls_score, cls_prob, bbox_pred, rois\n\n def get_summary(self, sess, blobs):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n summary = sess.run(self._summary_op_val, feed_dict=feed_dict)\n\n return summary\n\n def train_step(self, sess, blobs, train_op):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, _ = sess.run([self._losses[\"rpn_cross_entropy\"],\n self._losses['rpn_loss_box'],\n self._losses['cross_entropy'],\n self._losses['loss_box'],\n self._losses['total_loss'],\n train_op],\n feed_dict=feed_dict)\n return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss\n\n def train_step_with_summary(self, sess, blobs, train_op):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary, _ = sess.run([self._losses[\"rpn_cross_entropy\"],\n self._losses['rpn_loss_box'],\n self._losses['cross_entropy'],\n self._losses['loss_box'],\n self._losses['total_loss'],\n self._summary_op,\n train_op],\n feed_dict=feed_dict)\n return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary\n\n def train_step_no_return(self, sess, blobs, train_op):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n sess.run([train_op], feed_dict=feed_dict)\n\n"
]
| [
[
"tensorflow.constant_initializer",
"tensorflow.contrib.slim.max_pool2d",
"tensorflow.reshape",
"tensorflow.reverse",
"tensorflow.to_float",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.random_normal_initializer",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.summary.histogram",
"tensorflow.argmax",
"tensorflow.transpose",
"tensorflow.variable_scope",
"tensorflow.nn.dropout",
"tensorflow.abs",
"numpy.array",
"tensorflow.summary.merge",
"tensorflow.summary.scalar",
"tensorflow.py_func",
"tensorflow.image.roi_pooling",
"numpy.float32",
"tensorflow.device",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.contrib.slim.conv2d",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.summary.merge_all",
"tensorflow.to_int32",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.nn.zero_fraction",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.summary.image",
"tensorflow.losses.get_regularization_losses",
"tensorflow.not_equal",
"tensorflow.gather",
"tensorflow.truncated_normal_initializer",
"tensorflow.slice",
"tensorflow.pow"
]
]
|
MarcoMeter/neroRL | [
"f16c100b9029818e4322f7c1c88dbcc5bbade34b"
]
| [
"eval_checkpoints.py"
]
| [
"\"\"\"\nThis eval script evaluates all available models inside one directory.\n\nThe shape of the ouput data is as follow:\n(Checkpoint, Seed, Worker)\n\nEach data point is a dictionary given the episode information:\n - reward\n - length\n - seed\n\nDepending on the environment, more information might be available.\nFor example, Obstacle Tower has a floor key inside that dictionary.\n\"\"\"\nimport torch\nimport os\nimport time\nimport pickle\nimport numpy as np\nfrom docopt import docopt\nfrom gym import spaces\n\nfrom neroRL.utils.yaml_parser import YamlParser\nfrom neroRL.trainers.PPO.evaluator import Evaluator\nfrom neroRL.environments.wrapper import wrap_environment\nfrom neroRL.trainers.PPO.otc_model import OTCModel\nfrom neroRL.utils.serialization import load_checkpoint\n\ndef main():\n # Docopt command line arguments\n _USAGE = \"\"\"\n Usage:\n evaluate.py [options]\n evaluate.py --help\n\n Options:\n --config=<path> Path of the Config file [default: ./configs/default.yaml].\n --worker-id=<n> Sets the port for each environment instance [default: 2].\n --path=<path> Specifies the tag of the tensorboard summaries [default: None].\n --name=<path> Specifies the full path to save the output file [default: results.res].\n \"\"\"\n options = docopt(_USAGE)\n config_path = options[\"--config\"]\n worker_id = int(options[\"--worker-id\"])\n path = options[\"--path\"]\n name = options[\"--name\"]\n\n # Load environment, model, evaluation and training parameters\n configs = YamlParser(config_path).get_config()\n\n # Determine cuda availability\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Create dummy environment to retrieve the shapes of the observation and action space for further processing\n print(\"Step 1: Creating dummy environment of type \" + configs[\"environment\"][\"type\"])\n dummy_env = wrap_environment(configs[\"environment\"], worker_id)\n visual_observation_space = dummy_env.visual_observation_space\n vector_observation_space = dummy_env.vector_observation_space\n if isinstance(dummy_env.action_space, spaces.Discrete):\n action_space_shape = (dummy_env.action_space.n,)\n else:\n action_space_shape = tuple(dummy_env.action_space.nvec)\n dummy_env.close()\n\n # Init evaluator\n print(\"Step 1: Environment Config\")\n for k, v in configs[\"environment\"].items():\n print(\"Step 1: \" + str(k) + \": \" + str(v))\n print(\"Step 2: Evaluation Config\")\n for k, v in configs[\"evaluation\"].items():\n print(\"Step 2: \" + str(k) + \": \" + str(v))\n print(\"Step 2: Init Evaluator\")\n evaluator = Evaluator(configs, worker_id, visual_observation_space, vector_observation_space)\n\n # Init model\n print(\"Step 2: Initialize model\")\n model = OTCModel(configs[\"model\"], visual_observation_space,\n vector_observation_space, action_space_shape,\n configs[\"model\"][\"recurrence\"]).to(device)\n model.eval()\n\n # Load checkpoint paths\n print(\"Step 4: Load Checkpoint Paths\")\n checkpoints = get_sorted_checkpoints(path)\n print(\"Step 3: Number of Loaded Checkpoint Paths: \" + str(len(checkpoints)))\n\n # Evaluate checkpoints\n print(\"Step 5: Start Evaluation . . .\")\n print(\"Progress:\")\n results = []\n current_checkpoint = 0\n for checkpoint in checkpoints:\n loaded_checkpoint = load_checkpoint(checkpoint)\n model.load_state_dict(loaded_checkpoint[\"model_state_dict\"])\n if \"recurrence\" in configs[\"model\"]:\n model.set_mean_recurrent_cell_states(loaded_checkpoint[\"hxs\"], loaded_checkpoint[\"cxs\"])\n _, res = evaluator.evaluate(model, device)\n results.append(res)\n current_checkpoint = current_checkpoint + 1\n prog = current_checkpoint / len(checkpoints)\n print(f\"\\r{prog:.2f}\", end='', flush=True)\n evaluator.close()\n\n # Save results to file\n print(\"\")\n print(\"Step 6: Save to File: \" + name)\n results = np.asarray(results).reshape(len(checkpoints), len(configs[\"evaluation\"][\"seeds\"]), configs[\"evaluation\"][\"n_workers\"])\n outfile = open(name, \"wb\")\n pickle.dump(results, outfile)\n outfile.close()\n\ndef get_sorted_checkpoints(dirpath):\n \"\"\"Generates the full file paths to each checkpoint and sorts them alphabetically.\n\n Arguments:\n dirpath {string} -- Path to the directory containing the checkpoints\n\n Returns:\n {list} -- List that containts the full file path to each checkpoint\n \"\"\"\n a = [s for s in os.listdir(dirpath)\n if os.path.isfile(os.path.join(dirpath, s))]\n a.sort(key=lambda s: os.path.getmtime(os.path.join(dirpath, s)))\n for i, f in enumerate(a):\n a[i] = os.path.join(dirpath, f)\n return a\n\nif __name__ == \"__main__\":\n main()\n "
]
| [
[
"torch.cuda.is_available",
"numpy.asarray"
]
]
|
NoListen/RL-forest | [
"6c43d43cc223a8be02256a60c38d72839b9d3fca"
]
| [
"RL_forest/ddpg_plant/multi_ddpg/memory.py"
]
| [
"# https://github.com/openai/baselines/baselines/ddpg/memory.py\n\n# I want to use the memory used in mxdqn.\n\nimport numpy as np\n\n# TODO use the same storage space for both the obs0 and obs1. USE HALF Memory.\n# TODO use a dict to save all observation. (Key to key storation)\n\"\"\" I DON'T NEED TO CHANGE THE API AGAIN\nDICT HAS key:shape --> create the space\nDICT ALSO HAS key:data --> append the data\n\nEngineering Problem\n\"\"\"\n\n\nclass RingBuffer(object):\n def __init__(self, maxlen, shape, dtype=\"float32\"):\n self.maxlen = maxlen\n self.start = 0\n self.length = 0\n self.data = np.zeros((maxlen,) + shape, dtype=dtype)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n if idx < 0 or idx >= self.length:\n raise KeyError()\n return self.data[(self.start + idx) % self.maxlen]\n\n def get_batch(self, idxs):\n return self.data[(self.start + idxs) % self.maxlen]\n\n def append(self, v):\n if self.length < self.maxlen:\n # We have space, simply increase the length.\n self.length += 1\n elif self.length == self.maxlen:\n # No space, \"remove\" the first item.\n self.start = (self.start + 1) % self.maxlen\n else:\n # This should never happen.\n raise RuntimeError()\n self.data[(self.start + self.length - 1) % self.maxlen] = v\n\n\ndef array_min2d(x):\n x = np.array(x)\n if x.ndim >= 2:\n return x\n return x.reshape(-1, 1)\n\n# TODO integrate attacking map into unit_location map\n# TODO reuse the memory.(obs0 obs1 too many redundance)\n\nclass Memory(object):\n def __init__(self, limit, action_shape, observation_shape, unit_location_shape,\n mask_shape):\n self.limit = limit\n self.cls=\"simple\"\n # TODO change unit_location and mask as boolean type\n self.ul0 = RingBuffer(limit, shape=unit_location_shape, dtype=\"uint8\")\n self.ul1 = RingBuffer(limit, shape=unit_location_shape, dtype=\"uint8\")\n self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=\"uint8\")\n # process the observation in different manners.\n self.mask0 = RingBuffer(limit, shape=mask_shape, dtype=\"uint8\")\n self.mask1 = RingBuffer(limit, shape=mask_shape, dtype=\"uint8\")\n self.actions = RingBuffer(limit, shape=action_shape)\n self.rewards = RingBuffer(limit, shape=(1,))\n self.terminals1 = RingBuffer(limit, shape=(1,))\n self.observations1 = RingBuffer(limit, shape=observation_shape, dtype=\"uint8\")\n self.length = 0\n\n\n def sample(self, batch_size):\n # Draw such that we always have a proceeding element.\n batch_idxs = np.random.random_integers(self.nb_entries - 2, size=batch_size)\n\n obs0_batch = self.observations0.get_batch(batch_idxs)\n obs1_batch = self.observations1.get_batch(batch_idxs)\n mask0_batch = self.mask0.get_batch(batch_idxs)\n mask1_batch = self.mask1.get_batch(batch_idxs)\n action_batch = self.actions.get_batch(batch_idxs)\n reward_batch = self.rewards.get_batch(batch_idxs)\n terminal1_batch = self.terminals1.get_batch(batch_idxs)\n ul0_batch = self.ul0.get_batch(batch_idxs)\n ul1_batch = self.ul1.get_batch(batch_idxs)\n\n result = {\n 'ul0': array_min2d(ul0_batch),\n 'ul1': array_min2d(ul1_batch),\n 'obs0': array_min2d(obs0_batch),\n 'obs1': array_min2d(obs1_batch),\n 'mask0': array_min2d(mask0_batch),\n 'mask1': array_min2d(mask1_batch),\n 'rewards': array_min2d(reward_batch),\n 'actions': array_min2d(action_batch),\n 'terminals1': array_min2d(terminal1_batch),\n }\n return result\n\n # when it is changed. I think training will be changed.\n def append(self, obs0, mask0, ul0, action, reward, obs1, mask1, ul1, terminal1, training=True):\n if not training:\n return\n self.length = min(self.limit, self.length+1)\n self.observations0.append(obs0)\n self.mask0.append(mask0)\n self.mask1.append(mask1)\n self.actions.append(action)\n self.rewards.append(reward)\n self.observations1.append(obs1)\n self.terminals1.append(terminal1)\n self.ul0.append(ul0)\n self.ul1.append(ul1)\n\n @property\n def nb_entries(self):\n return len(self.observations0)\n\nclass ObservationBuffer(object):\n def __init__(self, limit, observation_shape, observation_dtype):\n assert (observation_dtype.keys() == observation_dtype.keys())\n self.d = {}\n # not list. belong to class dict_keys.\n self.k_set = list(observation_dtype.keys())\n for k in observation_dtype.keys():\n self.d[k] = RingBuffer(limit, shape=observation_shape[k], dtype=observation_dtype[k])\n\n def get_batch(self, batch_idx):\n b = {}\n for k in self.k_set:\n #b[k] = array_min2d(self.d[k].get_batch(batch_idx))\n b[k] = self.d[k].get_batch(batch_idx)\n return b\n\n def append(self, v):\n assert(set(list(v.keys())) == set(self.k_set))\n for k in self.k_set:\n self.d[k].append(v[k])\n\n\n def __len__(self):\n # pass the length to the upper level\n return len(self.d[self.k_set[0]])\n\n# similar to memory actually\n# obs is a dictionary stored in Observation Buffer\nclass CompoundMemory(object):\n def __init__(self, limit, action_shape, observation_shape, observation_dtype, reward_shape=(1,)):\n self.limit = limit\n assert(isinstance(observation_shape, dict))\n assert(isinstance(observation_dtype, dict))\n\n self.observations0 = ObservationBuffer(limit, observation_shape, observation_dtype)\n self.observations1 = ObservationBuffer(limit, observation_shape, observation_dtype)\n self.actions = RingBuffer(limit, shape=action_shape)\n self.rewards = RingBuffer(limit, shape=reward_shape)\n # can be changed to boolean\n self.cls = \"compound\"\n self.terminals1 = RingBuffer(limit, shape=(1,))\n self.length = 0\n\n def sample(self, batch_size):\n # Draw such that we always have a proceeding element.\n batch_idxs = np.random.random_integers(self.nb_entries - 2, size=batch_size)\n obs0_batch = self.observations0.get_batch(batch_idxs)\n obs1_batch = self.observations1.get_batch(batch_idxs)\n action_batch = self.actions.get_batch(batch_idxs)\n reward_batch = self.rewards.get_batch(batch_idxs)\n terminal1_batch = self.terminals1.get_batch(batch_idxs)\n\n result = {\n 'obs0': obs0_batch,\n 'obs1': obs1_batch,\n 'rewards': array_min2d(reward_batch),\n 'actions': array_min2d(action_batch),\n 'terminals1': array_min2d(terminal1_batch)\n }\n\n return result\n\n # when it is changed. I think training will be changed.\n def append(self, obs0, action, reward, obs1, terminal1, training=True):\n if not training:\n return\n self.length = min(self.limit, self.length+1)\n self.observations0.append(obs0)\n self.actions.append(action)\n self.rewards.append(reward)\n self.observations1.append(obs1)\n self.terminals1.append(terminal1)\n\n @property\n def nb_entries(self):\n return len(self.observations0)\n\n"
]
| [
[
"numpy.array",
"numpy.random.random_integers",
"numpy.zeros"
]
]
|
Diwakar98/COL380-A2-Crout-Decomposition-Using-OpenMP-And-MPI | [
"11717f64e041a95f82293f230257298b3a37e7c5"
]
| [
"checker.py"
]
| [
"#!/usr/bin/env python\n# coding: utf-8\nimport sys\nimport numpy as np\n\nwith open(sys.argv[1], \"r\") as f:\n\tA = f.read()\n\nwith open(sys.argv[2], \"r\") as f:\n\tL = f.read()\n\nwith open(sys.argv[3], \"r\") as f:\n\tU = f.read()\n\n\ndef convert2numpy(mat):\n\tmat = mat.strip().split(\"\\n\")\n\tmat = [i.strip().split() for i in mat]\n\tfor x in mat:\n\t\tfor y in x:\n\t\t\ta, b = y.strip().split('.')\n\t\t\tassert len(b) == 12, \"incorrect precision\"\n\n\treturn np.array(mat).astype(np.double)\n\nA, L, U = (convert2numpy(x) for x in (A,L,U))\n\n# forcing the matrices to be triangular\nL = np.tril(L)\nU = np.triu(U)\n\nassert U.shape == L.shape == A.shape, \"invalid shape\"\nprint(\"n was {}\".format(A.shape[0]))\n\nA_dash = np.matmul(L, U)\n\nprint(\"max deviation from true values was {}\".format(abs(A - A_dash).max()))\n\nprint()\n# print()\n\nif np.allclose(A, A_dash, atol = 1e-3) and abs(np.linalg.det(U) - 1) < 1e-3:\n\tprint(\"Valid Crout Decomposition\")\nelse:\n\tprint(\"Invalid Crout Decomposition\")\n\n\n"
]
| [
[
"numpy.array",
"numpy.matmul",
"numpy.linalg.det",
"numpy.triu",
"numpy.allclose",
"numpy.tril"
]
]
|
KetanRamesh/Reddit-Text-Analyser | [
"d1cea14b64b6679ec658eddadcbb6955ace731d7"
]
| [
"app/keywords/worker_keywords.py"
]
| [
"#! usr/bin/env python3\n\nimport os\nimport json\nimport jsonpickle\nimport pandas as pd\n\nfrom gensim.summarization import keywords\n\nimport pika, redis\n\n## RabbitMQ and Redis connection\nrabbitMQHost = os.getenv(\"RABBITMQ_HOST\") or \"localhost\"\nredisHost = os.getenv(\"REDIS_HOST\") or \"localhost\"\n\nprint(\"Connecting to rabbitmq({}) and redis({})\".format(rabbitMQHost,redisHost))\n\n## Redis tables\ndb_posts = redis.Redis(host=redisHost, db=1)\ndb_keywords = redis.Redis(host=redisHost, db=3)\n\nclass Keywords:\n\n def __init__(self, submissions=None):\n self.submissions = submissions\n self.freq_dict = {}\n \n def extract_keywords(self, text):\n try:\n return keywords(text).split(\"\\n\")\n except:\n print(\"Call to keywords failed\")\n\n def populate_dict(self, keywords_list):\n if keywords_list == None:\n print(\"No keywords.\")\n return\n\n for key in keywords_list:\n self.freq_dict[key] = self.freq_dict.get(key, 0) + 1\n\n def compute_keywords(self):\n for i in range(len(self.submissions)):\n post = self.submissions.loc[i]\n text = str(post['title'] + post['body'])\n keywords_list = self.extract_keywords(text)\n self.populate_dict(keywords_list)\n\n top_keywords = sorted(self.freq_dict.keys(), key=lambda k: self.freq_dict[k], reverse=True)\n \n print(top_keywords)\n \n if len(top_keywords) < 10:\n return top_keywords\n \n return top_keywords[:10]\n\n def worker(self):\n return self.compute_keywords()\n\ndef get_rabbitMQ():\n try:\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=rabbitMQHost))\n channel = connection.channel()\n print(\"Connection to RabbitMQ successful.\")\n except:\n print(\"Error connecting to RabbitMQ.\")\n return\n\n return connection, channel\n\ndef callback(ch, method, properties, body):\n body = jsonpickle.decode(body)\n submissions = list(db_posts.smembers(body['sub_name']))[0]\n submissions = jsonpickle.decode(submissions)\n \n keywords_ob = Keywords(pd.DataFrame(submissions))\n top_keywords = keywords_ob.worker()\n \n db_keywords.sadd(body['sub_name'], jsonpickle.encode(top_keywords))\n\ndef main():\n connection, channel = get_rabbitMQ()\n channel.exchange_declare(exchange='redditHandle', exchange_type='direct')\n result = channel.queue_declare(queue='worker_keywords', durable=True)\n queue_name = result.method.queue\n\n channel.queue_bind(\n exchange='redditHandle', \n queue=queue_name,\n routing_key='worker_keywords'\n )\n\n channel.basic_consume(queue=queue_name, on_message_callback=callback, auto_ack=True)\n channel.start_consuming()\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"pandas.DataFrame"
]
]
|
ameen-khosrowzadeh/MIAProject | [
"9f52d3c01def3f7c6d7ea8fd091d740fcfa8610f"
]
| [
"bin/oob.py"
]
| [
"\"\"\"A medical image analysis pipeline.\n\nThe pipeline is used for brain tissue segmentation using a decision forest classifier.\n\"\"\"\n\nimport argparse\nimport datetime\nimport os\nimport sys\nimport timeit\nimport warnings\n\nimport SimpleITK as sitk\nimport sklearn.ensemble as sk_ensemble\nfrom sklearn.metrics import plot_roc_curve\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.preprocessing import label_binarize\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom scipy import interp\nfrom sklearn.metrics import roc_auc_score\n\n\n\nimport numpy as np\nimport pymia.data.conversion as conversion\nimport pymia.evaluation.writer as writer\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nsys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path\n# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)\n# somehow pip install does not keep track of packages\n\nimport mialab.data.structure as structure\nimport mialab.utilities.file_access_utilities as futil\nimport mialab.utilities.pipeline_utilities as putil\nfrom sklearn.metrics import f1_score\n\nLOADING_KEYS = [structure.BrainImageTypes.T1w,\n structure.BrainImageTypes.T2w,\n structure.BrainImageTypes.GroundTruth,\n structure.BrainImageTypes.BrainMask,\n structure.BrainImageTypes.RegistrationTransform] # the list of data we will load\n\n\n\n\n\n\n\ndef main(result_dir: str, data_atlas_dir: str, data_train_dir: str, data_test_dir: str):\n \"\"\"Brain tissue segmentation using decision forests.\n\n The main routine executes the medical image analysis pipeline:\n\n - Image loading\n - Registration\n - Pre-processing\n - Feature extraction\n - Decision forest classifier model building\n - Segmentation using the decision forest classifier model on unseen images\n - Post-processing of the segmentation\n - Evaluation of the segmentation\n \"\"\"\n\n # load atlas images\n putil.load_atlas_images(data_atlas_dir)\n\n print('-' * 5, 'Training...')\n\n # crawl the training image directories\n crawler = futil.FileSystemDataCrawler(data_train_dir,\n LOADING_KEYS,\n futil.BrainImageFilePathGenerator(),\n futil.DataDirectoryFilter())\n pre_process_params = {'skullstrip_pre': True,\n 'normalization_pre': True,\n 'registration_pre': True,\n 'coordinates_feature': True,\n 'intensity_feature': True,\n 'gradient_intensity_feature': True}\n\n # load images for training and pre-process\n images = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False)\n\n # generate feature matrix and label vector\n data_train = np.concatenate([img.feature_matrix[0] for img in images])\n labels_train = np.concatenate([img.feature_matrix[1] for img in images]).squeeze()\n\n warnings.warn('Random forest parameters not properly set.')\n # visualization(images)\n print(np.shape(images[0].feature_matrix[0]))\n\n error_rate=[]\n for num_estimators in range(1,10):\n forest = sk_ensemble.RandomForestClassifier(max_features=images[0].feature_matrix[0].shape[1],\n n_estimators=num_estimators,\n max_depth=10,oob_score=True)\n\n # start_time = timeit.default_timer()\n forest.fit(data_train, labels_train)\n\n oob_error = 1 - forest.oob_score_\n print(forest.oob_score_)\n error_rate+=[oob_error]\n\n\n\n\n plt.plot(range(1,10),error_rate)\n plt.show()\n\n\n #\n # print(' Time elapsed:', timeit.default_timer() - start_time, 's')\n #\n # # create a result directory with timestamp\n # t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n # result_dir = os.path.join(result_dir, t)\n #\n # os.makedirs(result_dir, exist_ok=True)\n #\n # print('-' * 5, 'Testing...')\n #\n # # initialize evaluator\n # evaluator = putil.init_evaluator()\n #\n # # crawl the training image directories\n # crawler = futil.FileSystemDataCrawler(data_test_dir,\n # LOADING_KEYS,\n # futil.BrainImageFilePathGenerator(),\n # futil.DataDirectoryFilter())\n #\n # # load images for testing and pre-process\n # pre_process_params['training'] = False\n # images_test = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False)\n #\n # data_test = np.concatenate([img.feature_matrix[0] for img in images_test])\n # labels_test = np.concatenate([img.feature_matrix[1] for img in images_test]).squeeze()\n #\n # random_state = np.random.RandomState(0)\n # # ax = plt.gca()\n # # rfc_disp = plot_roc_curve(forest, data_test, labels_test, ax=ax, alpha=0.8)\n # # svc_disp.plot(ax=ax, alpha=0.8)\n # # disp = plot_confusion_matrix(forest, data_test, labels_test, normalize='true')\n # # plt.show()\n # X= np.concatenate((data_train,data_test))\n # y= np.concatenate((labels_train,labels_test))\n # y = label_binarize(y, classes=[0, 1, 2 , 3, 4 , 5])\n # n_classes = y.shape[1]\n # n_samples, n_features = X.shape\n #\n # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,random_state=0)\n #\n # # classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,\n # # random_state=random_state))\n #\n # classifier = OneVsRestClassifier(sk_ensemble.RandomForestClassifier(max_features=images[0].feature_matrix[0].shape[1],\n # n_estimators=10,\n # max_depth=5))\n #\n # y_score = classifier.fit(X_train, y_train).predict(X_test)\n #\n #\n #\n # # Compute ROC curve and ROC area for each class\n # fpr = dict()\n # tpr = dict()\n # roc_auc = dict()\n # for i in range(n_classes):\n # fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n # roc_auc[i] = auc(fpr[i], tpr[i])\n #\n # # Compute micro-average ROC curve and ROC area\n # fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n # roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n #\n # # First aggregate all false positive rates\n # all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n #\n # # Then interpolate all ROC curves at this points\n # mean_tpr = np.zeros_like(all_fpr)\n # for i in range(n_classes):\n # mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n #\n # # Finally average it and compute AUC\n # mean_tpr /= n_classes\n #\n # fpr[\"macro\"] = all_fpr\n # tpr[\"macro\"] = mean_tpr\n # roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n #\n # plt.figure()\n # lw = 2\n # plt.plot(fpr[2], tpr[2], color='darkorange',\n # lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\n # plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n # plt.xlim([0.0, 1.0])\n # plt.ylim([0.0, 1.05])\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Receiver operating characteristic example')\n # plt.legend(loc=\"lower right\")\n # plt.show()\n #\n #\n # # Plot all ROC curves\n # plt.figure()\n # plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n # label='micro-average ROC curve (area = {0:0.2f})'\n # ''.format(roc_auc[\"micro\"]),\n # color='deeppink', linestyle=':', linewidth=4)\n #\n # plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n # label='macro-average ROC curve (area = {0:0.2f})'\n # ''.format(roc_auc[\"macro\"]),\n # color='navy', linestyle=':', linewidth=4)\n #\n # colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\n # for i, color in zip(range(n_classes), colors):\n # plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n # label='ROC curve of class {0} (area = {1:0.2f})'\n # ''.format(i, roc_auc[i]))\n #\n # plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n # plt.xlim([0.0, 1.0])\n # plt.ylim([0.0, 1.05])\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Some extension of Receiver operating characteristic to multi-class')\n # plt.legend(loc=\"lower right\")\n # plt.show()\n\n # evaluator.clear()\n\n\nif __name__ == \"__main__\":\n \"\"\"The program's entry point.\"\"\"\n\n script_dir = os.path.dirname(sys.argv[0])\n\n parser = argparse.ArgumentParser(description='Medical image analysis pipeline for brain tissue segmentation')\n\n parser.add_argument(\n '--result_dir',\n type=str,\n default=os.path.normpath(os.path.join(script_dir, './mia-result')),\n help='Directory for results.'\n )\n\n parser.add_argument(\n '--data_atlas_dir',\n type=str,\n default=os.path.normpath(os.path.join(script_dir, '../data/atlas')),\n help='Directory with atlas data.'\n )\n\n parser.add_argument(\n '--data_train_dir',\n type=str,\n default=os.path.normpath(os.path.join(script_dir, '../data/train/')),\n help='Directory with training data.'\n )\n\n parser.add_argument(\n '--data_test_dir',\n type=str,\n default=os.path.normpath(os.path.join(script_dir, '../data/test/')),\n help='Directory with testing data.'\n )\n\n args = parser.parse_args()\n main(args.result_dir, args.data_atlas_dir, args.data_train_dir, args.data_test_dir)\n"
]
| [
[
"numpy.concatenate",
"matplotlib.pyplot.show",
"numpy.shape",
"sklearn.ensemble.RandomForestClassifier"
]
]
|
fhalab/SPGen | [
"c16afe0c13501bc1aa2c8cccb10865ae9934aff8"
]
| [
"remote_generation/attention-is-all-you-need-pytorch/transformer/SubLayers.py"
]
| [
"''' Define the sublayers in encoder/decoder layer '''\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.init as init\r\nfrom transformer.Modules import BottleLinear as Linear\r\nfrom transformer.Modules import ScaledDotProductAttention\r\n#from transformer.Modules import BottleLayerNormalization as LayerNormalization\r\nfrom transformer.Modules import LayerNormalization\r\n\r\n__author__ = \"Yu-Hsiang Huang\"\r\n\r\nclass MultiHeadAttention(nn.Module):\r\n ''' Multi-Head Attention module '''\r\n\r\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\r\n super(MultiHeadAttention, self).__init__()\r\n\r\n self.n_head = n_head\r\n self.d_k = d_k\r\n self.d_v = d_v\r\n\r\n self.w_qs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))\r\n self.w_ks = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))\r\n self.w_vs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_v))\r\n\r\n self.attention = ScaledDotProductAttention(d_model)\r\n self.layer_norm = LayerNormalization(d_model)\r\n self.proj = Linear(n_head*d_v, d_model)\r\n\r\n self.dropout = nn.Dropout(dropout)\r\n\r\n init.xavier_normal(self.w_qs)\r\n init.xavier_normal(self.w_ks)\r\n init.xavier_normal(self.w_vs)\r\n\r\n def forward(self, q, k, v, attn_mask=None):\r\n\r\n d_k, d_v = self.d_k, self.d_v\r\n n_head = self.n_head\r\n\r\n residual = q\r\n\r\n mb_size, len_q, d_model = q.size()\r\n mb_size, len_k, d_model = k.size()\r\n mb_size, len_v, d_model = v.size()\r\n\r\n # treat as a (n_head) size batch\r\n q_s = q.repeat(n_head, 1, 1).view(n_head, -1, d_model) # n_head x (mb_size*len_q) x d_model\r\n k_s = k.repeat(n_head, 1, 1).view(n_head, -1, d_model) # n_head x (mb_size*len_k) x d_model\r\n v_s = v.repeat(n_head, 1, 1).view(n_head, -1, d_model) # n_head x (mb_size*len_v) x d_model\r\n\r\n # treat the result as a (n_head * mb_size) size batch\r\n q_s = torch.bmm(q_s, self.w_qs).view(-1, len_q, d_k) # (n_head*mb_size) x len_q x d_k\r\n k_s = torch.bmm(k_s, self.w_ks).view(-1, len_k, d_k) # (n_head*mb_size) x len_k x d_k\r\n v_s = torch.bmm(v_s, self.w_vs).view(-1, len_v, d_v) # (n_head*mb_size) x len_v x d_v\r\n\r\n # perform attention, result size = (n_head * mb_size) x len_q x d_v\r\n outputs, attns = self.attention(q_s, k_s, v_s, attn_mask=attn_mask.repeat(n_head, 1, 1))\r\n\r\n # back to original mb_size batch, result size = mb_size x len_q x (n_head*d_v)\r\n outputs = torch.cat(torch.split(outputs, mb_size, dim=0), dim=-1) \r\n\r\n # project back to residual size\r\n outputs = self.proj(outputs)\r\n outputs = self.dropout(outputs)\r\n\r\n return self.layer_norm(outputs + residual), attns\r\n\r\nclass PositionwiseFeedForward(nn.Module):\r\n ''' A two-feed-forward-layer module '''\r\n\r\n def __init__(self, d_hid, d_inner_hid, dropout=0.1):\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) # position-wise\r\n self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1) # position-wise\r\n self.layer_norm = LayerNormalization(d_hid)\r\n self.dropout = nn.Dropout(dropout)\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, x):\r\n residual = x\r\n output = self.relu(self.w_1(x.transpose(1, 2)))\r\n output = self.w_2(output).transpose(2, 1)\r\n output = self.dropout(output)\r\n return self.layer_norm(output + residual)\r\n"
]
| [
[
"torch.nn.Dropout",
"torch.nn.Conv1d",
"torch.nn.init.xavier_normal",
"torch.FloatTensor",
"torch.split",
"torch.bmm",
"torch.nn.ReLU"
]
]
|
ddanielalves/AlphaZero-Checkers | [
"bec24d3bc2011db9159af7355105d6698a30c12c"
]
| [
"PV_NN.py"
]
| [
"import os.path\n\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense, BatchNormalization, Conv2D, Activation, Dropout, Input, Flatten, Concatenate\nfrom tensorflow.keras.optimizers import Adam\nimport numpy as np\n\nimport config\n\nclass Policy_Value_NN:\n def __init__(self, game, load_best = False) -> None:\n \"\"\"Definition of the layers for the Policy Value NN. \n The network has one input layer and two output layers, one for the value and other for the policy.\n\n Args:\n game (Game): game object to obtain the observation and action space sizes.\n load_best (bool, optional): Flag that defined if the best nn should be loaded from memory. Defaults to False.\n \"\"\"\n\n input_shape = game.OBSERVATION_SPACE_VALUES\n action_size = game.ACTION_SPACE_SIZE\n\n # Neural Net\n input_layer = Input(shape=input_shape)\n\n hidden_layer = Conv2D(512, 2, padding='same')(input_layer)\n hidden_layer = BatchNormalization(axis=3)(hidden_layer)\n hidden_layer = Activation('relu')(hidden_layer)\n \n hidden_layer = Conv2D(256, 3, padding='same')(input_layer)\n hidden_layer = BatchNormalization(axis=3)(hidden_layer)\n hidden_layer = Activation('relu')(hidden_layer)\n\n hidden_layer = Flatten()(hidden_layer) \n \n hidden_layer = Concatenate()([hidden_layer, Flatten()(input_layer)])\n\n hidden_layer = Dropout(config.DROPOUT_RATE)(hidden_layer)\n hidden_layer = Dense(1024)(hidden_layer)\n hidden_layer = BatchNormalization(axis=1)(hidden_layer)\n hidden_layer = Activation('relu')(hidden_layer)\n \n hidden_layer = Dropout(config.DROPOUT_RATE)(hidden_layer)\n hidden_layer = Dense(1024)(hidden_layer)\n hidden_layer = BatchNormalization(axis=1)(hidden_layer)\n hidden_layer = Activation('relu')(hidden_layer)\n\n hidden_layer = Dropout(config.DROPOUT_RATE)(hidden_layer)\n hidden_layer = Dense(1024)(hidden_layer)\n hidden_layer = BatchNormalization(axis=1)(hidden_layer)\n hidden_layer = Activation('relu')(hidden_layer)\n\n policy_head = Dense(action_size, activation='softmax', name='pi')(hidden_layer) # batch_size x self.action_size\n value_head = Dense(1, activation='tanh', name='v')(hidden_layer) # batch_size x 1\n\n self.model = Model(inputs=input_layer, outputs=[policy_head, value_head])\n self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(config.LEARNING_RATE))\n\n if load_best:\n self.load_model(f\"./trained_models/best_{game.nrows}.h5\")\n\n\n def load_model(self, filepath):\n \"\"\"Load a model from the disk\n\n Args:\n filepath (str): filepath to the model to load.\n \"\"\"\n\n if os.path.isfile(filepath):\n self.model = load_model(filepath)\n \n def predict(self, game):\n \"\"\"Predict the policy and value for the current game state\n\n Args:\n game (Game): Game with the board state to predict\n\n Raises:\n Exception: Raise an exception if the network outputs a policy that does not match the valid moves for debug\n\n Returns:\n policy: dict of moves and probabilities\n value: value the nn gives to the state\n \"\"\"\n\n board = game.state\n valid_moves = game.get_valid_actions()\n\n policy, value = self.model(board)\n policy, value = policy[0], value[0].numpy()[0]\n\n pi = np.zeros(len(valid_moves))\n for i, move in enumerate(valid_moves):\n pi[i] = policy[move]\n\n if pi.sum() == 0:\n print(pi, valid_moves, policy, board, game.game_finished())\n raise Exception(\"policy summing to 0\")\n \n pi = pi/pi.sum() \n policy = dict(zip(valid_moves, pi))\n\n return policy, value\n \n def save_model(self, filepath):\n \"\"\"Saving the model to disk\n\n Args:\n filepath (str): filepath to place the model\n \"\"\"\n self.model.save(filepath)\n\n def train(self, X, y_policy, y_value):\n self.model.fit(\n X,\n [y_policy, y_value],\n batch_size=config.MINIBATCH_SIZE,\n epochs=config.EPOCHS,\n )\n\n\n "
]
| [
[
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.Model",
"tensorflow.keras.models.load_model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Concatenate"
]
]
|
SausanCantik/descriptive-stats-phenotype-data | [
"04aab439530013b78cdf8f850d2f1c06fa73cbd7"
]
| [
"03. Null data treatment/column_expansion.py"
]
| [
"'''\r\nA function to add accession_group as a column in phenotype data and\r\nencode the entries\r\n'''\r\n\r\ndef column_expansion (dataframe): \r\n accession_group = string_split(dataframe)\r\n \r\n #encode the entries\r\n from sklearn import preprocessing #input encoder library\r\n \r\n le = preprocessing.LabelEncoder()\r\n accession_group = le.fit(accession_group).transform(accession_group)\r\n \r\n #Add column 'accession_group' to dataframe\r\n dataframe['Accession_group'] = accession_group\r\n \r\n return dataframe\r\n"
]
| [
[
"sklearn.preprocessing.LabelEncoder"
]
]
|
hammer-mt/hommmer | [
"a02cb87841395f30911242a019f28f6ac15f27ec"
]
| [
"src/hommmer/metrics/rsquared.py"
]
| [
"from sklearn import metrics\n\ndef rsquared(y_actual, y_pred):\n # r squared\n value = round(metrics.r2_score(y_actual, y_pred), 3)\n passed = \"✔️\" if value > 0.8 else \"❌\"\n return value, passed"
]
| [
[
"sklearn.metrics.r2_score"
]
]
|
jychoi-hpc/fluctana | [
"a8950c347d422e471a43034e05214e3fec58d428"
]
| [
"stats.py"
]
| [
"import numpy as np\nfrom scipy import signal\nimport math\nimport itertools\n\nimport pickle \n\nimport matplotlib.pyplot as plt\n\n\ndef skewness(t, x, detrend=1):\n # normalize\n x = x / x[0]\n \n if detrend == 1:\n x = signal.detrend(x, type='linear')\n\n nx = (x - np.mean(x)) / np.std(x - np.mean(x))\n skew = np.mean(nx**3) / np.mean(nx**2)**(3.0/2.0)\n\n return skew\n\n\ndef kurtosis(t, x, detrend=1):\n # normalize\n x = x / x[0]\n\n if detrend == 1:\n x = signal.detrend(x, type='linear')\n\n nx = (x - np.mean(x)) / np.std(x - np.mean(x))\n kurt = np.mean(nx**4) / np.mean(nx**2)**2 - 3\n\n return kurt\n\n\ndef hurst(t, x, bins=30, detrend=1, fitlims=[10,1000], **kwargs):\n # R/S method for fGm\n # (generalized hurst exponent for fBm)\n # axis\n bsize = int(1.0*len(t)/bins)\n ax = np.floor( 10**(np.arange(1.0, np.log10(bsize), 0.01)) )\n\n ers = np.zeros((bins, len(ax)))\n\n for b in range(bins):\n idx1 = b*bsize\n idx2 = idx1 + bsize\n\n sx = x[idx1:idx2]\n\n if detrend == 1:\n sx = signal.detrend(sx, type='linear')\n\n for i in range(len(ax)):\n ls = int( ax[i] ) # length of each sub-region\n ns = int( 1.0*ax[-1]/ls ) # number of sub-region\n\n delta = np.zeros((ls + 1, 1))\n for j in range(ns):\n jdx1 = j*ls\n jdx2 = jdx1 + ls\n\n ssx = sx[jdx1:jdx2]\n\n delta[1:,0] = np.cumsum(ssx) - np.cumsum(np.ones(ls))*sum(ssx)/ls\n\n r = np.max(delta) - np.min(delta)\n s = np.sqrt(np.sum(ssx**2)/ls - (np.sum(ssx)/ls)**2)\n\n ers[b,i] = ers[b,i] + r/s/ns\n\n # time lag axis\n dt = t[1] - t[0]\n tax = ax*dt*1e6 # [us]\n # ERS\n mean_ers = np.mean(ers, 0)\n std_ers = np.std(ers, axis=0)\n\n ptime = tax # time lag [us]\n pdata = mean_ers\n plt.plot(ptime, pdata, '-x')\n fidx = (fitlims[0] <= ptime) * (ptime <= fitlims[1])\n fit = np.polyfit(np.log10(ptime[fidx]), np.log10(pdata[fidx]), 1)\n fit_data = 10**(fit[1])*ptime**(fit[0])\n plt.plot(ptime, fit_data, 'r')\n\n # Hurst exponent\n hurst_exp = fit[0]\n\n return tax, mean_ers, std_ers, hurst_exp, fit_data\n\n\ndef bp_prob(x, d=6, bins=1):\n # BP_probability\n nst = math.factorial(d) # number of possible states\n ax = np.arange(nst) + 1 # state number\n\n bsize = int(1.0*len(x)/bins)\n # print('For an accurate estimation of the probability, bsize {:g} should be considerably larger than nst {:g}'.format(bsize, nst))\n\n # possible orders\n orders = np.empty((0,d))\n for p in itertools.permutations(np.arange(d)):\n orders = np.append(orders,np.atleast_2d(p),axis=0)\n\n # calculate permutation probability\n val = np.zeros((nst, bins))\n\n for b in range(bins):\n idx1 = b*bsize\n idx2 = idx1 + bsize\n\n sx = x[idx1:idx2]\n\n jnum = len(sx) - d + 1\n for j in range(jnum):\n ssx = sx[j:(j+d)]\n\n sso = np.argsort(ssx)\n bingo = np.sum(np.abs(orders - np.tile(sso, (nst, 1))), 1) == 0\n val[bingo, b] = val[bingo, b] + 1.0/jnum\n\n pi = np.mean(val, 1) # bin averaged pi\n pierr = np.std(val, 1)\n\n # sort\n pio = np.argsort(-pi)\n val = pi[pio] # bin averaged sorted pi\n std = pierr[pio]\n\n return ax, val, std\n\n\ndef ns_entropy(pi):\n nst = len(pi)\n pinz = pi[pi != 0] # to avoid blow up in entropy calculation\n spi = np.sum(-pinz * np.log(pinz)) # Shannon entropy\n nsent = spi/np.log(nst) # normalized Shannon entropy\n\n return nsent\n\n\ndef js_complexity(pi):\n # Jensen Shannon complexity with a given probability [Rosso PRL 2007]\n nst = len(pi)\n\n nsent = ns_entropy(pi)\n spi = nsent * np.log(nst) # Shannon entropy\n\n pe = 1.0*np.ones(nst)/nst\n spe = np.sum(-pe * np.log(pe))\n\n pieh = (pi + pe)/2.0\n spieh = np.sum(-pieh * np.log(pieh))\n\n # Jensen Shannon complexity\n jscom = -2.0*(spieh - spi/2.0 - spe/2.0)/((nst + 1.0)/nst*np.log(nst+1.0) - 2.0*np.log(2.0*nst) + np.log(nst))*nsent\n\n return jscom\n\n\ndef ch_measure(pi):\n # Jensen Shannon complexity, normalized Shannon entropy measure with a given BP probability [Rosso PRL 2007]\n # chaotic : moderate C and H, above fBm\n # stochastic : low C and high H, below fBm\n\n # normalized Shannon entropy\n nsent = ns_entropy(pi)\n\n # Jensen Shannon complexity\n jscom = js_complexity(pi)\n\n return jscom, nsent\n\n\ndef lmc_complexity(pi, nst):\n pe = np.ones(nst)/nst\n\n pinz = pi[pi != 0] # to avoid blow up in log\n nent = -1.0/np.log(nst)*np.sum(pinz * np.log(pinz))\n\n diseq = np.sum((pi - pe)**2)\n\n clmc = diseq*nent\n\n return clmc, nent\n\n\ndef complexity_limits(d):\n nst = math.factorial(d)\n\n pval = np.arange(1.0/nst,1,0.001)\n Hone = -1.0/np.log(nst)*(pval * np.log(pval) + (1.0-pval)*np.log((1.0-pval)/(nst-1.0)))\n Cone = np.zeros(len(Hone))\n for i in range(len(Hone)):\n pi = np.zeros(nst)\n pi[0] = pval[i]\n pi[1:] = (1.0 - pval[i])/(nst - 1.0)\n Cone[i] = js_complexity(pi)\n # plt.plot(Hone, Cone, 'k')\n\n Htwo = np.array([1])\n Ctwo = np.array([0])\n for n in range(nst-1):\n pmin = np.arange(0.001,1.0/(nst-n),0.001)\n # pmin = np.arange(0.001,0.1,0.001)\n Hext = -1.0/np.log(nst)*(pmin * np.log(pmin) + (1.0-pmin)*np.log((1.0-pmin)/(nst-n-1.0)))\n Cext = np.zeros(len(Hext))\n for i in range(len(Hext)):\n pi = np.zeros(nst)\n pi[0:n] = 0\n pi[n:(n+1)] = pmin[i]\n pi[(n+1):] = (1.0 - pmin[i])/(nst - n - 1.0)\n Cext[i] = js_complexity(pi)\n # plt.plot(Hext, Cext, 'k')\n Htwo = np.concatenate((Htwo, Hext), axis=0)\n Ctwo = np.concatenate((Ctwo, Cext), axis=0)\n idx = np.argsort(Htwo)\n Htwo = Htwo[idx]\n Ctwo = Ctwo[idx]\n\n return Hone, Cone, Htwo, Ctwo\n\n\ndef fmb_fgn_locus(d):\n try:\n with open('../chdata/ch_fbm_fgn_d{:d}.pkl'.format(d), 'rb') as f:\n [c_fbm, h_fbm, c_fgn, h_fgn] = pickle.load(f)\n except:\n pass\n\n return c_fbm, h_fbm, c_fgn, h_fgn\n\n\ndef fisher_measure(pi):\n # fisher information measure\n if ns_entropy(pi) == 0:\n f0 = 1.0\n else:\n f0 = 1.0/2.0\n\n fim = f0*np.sum( ( np.sqrt(pi[1:]) - np.sqrt(pi[:-1]) )**2 )\n\n return fim\n\n\ndef intermittency(t, x, bins=20, overlap=0.2, qstep=0.3, fitlims=[20.0,100.0], verbose=1, **kwargs):\n # intermittency parameter from multi-fractal analysis [Carreras PoP 2000]\n # this ranges from 0 (mono-fractal) to 1\n # add D fitting later\n\n # axis\n qax = np.arange(-2,8,qstep) # order axis\n N = len(x)\n Tmax = int( N/(bins - overlap*(bins - 1.0)) ) # minimum bin -> maximum data length\n Tax = np.floor( 10**(np.arange(1, np.log10(Tmax), 0.1)) ) # sub-data length axis\n nTax = Tax/N # normalized axis\n\n # data dimension\n eTq = np.zeros((len(Tax), len(qax)))\n K = np.zeros(len(qax))\n C = np.zeros(len(qax))\n D = np.zeros(len(qax))\n\n # first axes\n x = signal.detrend(x, type='linear')\n\n if verbose == 1:\n plt.subplots_adjust(hspace = 0.5, wspace = 0.3)\n axes1 = plt.subplot(5,1,1)\n\n plt.plot(t, x)\n\n ndxe = (x - np.mean(x))**2 / np.mean((x - np.mean(x))**2) # Eq.(7)\n\n for t, T in enumerate(Tax): # loop over different length T\n bins = int( N/(T - overlap*(T-1)) ) # number of bins with length T\n\n eT = np.zeros(bins)\n bstep = int(T*(1 - overlap))\n for j in range(bins):\n idx1 = j*bstep\n idx2 = int(idx1 + T)\n\n eT[j] = np.mean(ndxe[idx1:idx2]) # Eq.(9)\n\n # calculate moments\n for k, q in enumerate(qax):\n eTq[t, k] = np.mean(eT**(q)) # Eq.(10)\n\n # second axes\n if verbose == 1: plt.subplot(5,1,2)\n # calculate K\n for k, q in enumerate(qax):\n if verbose == 1: plt.plot(nTax, eTq[:,k], 'o')\n\n # fit range\n nT1 = fitlims[0]/N\n nT2 = fitlims[1]/N\n idx = (nT1 < nTax) * (nTax < nT2)\n\n lx = np.log(nTax[idx])\n ly = np.log(eTq[idx,k])\n\n fit = np.polyfit(lx, ly, 1)\n fit_func = np.poly1d(fit)\n K[k] = -fit[0]\n\n fx = np.arange(nTax.min(), nTax.max(), 1.0/N)\n fy = np.exp(fit_func(np.log(fx)))\n if verbose == 1:\n plt.plot(fx, fy)\n\n plt.axvline(x=nT1, color='r')\n plt.axvline(x=nT2, color='r')\n\n if verbose == 1:\n plt.title('Linear fit of loglog plot is -K(q)')\n plt.xlabel('T/N')\n plt.ylabel('eTq moments')\n plt.xscale('log')\n plt.yscale('log')\n\n # third axes\n plt.subplot(5,1,3)\n plt.plot(qax, K, '-o')\n plt.xlabel('q')\n plt.ylabel('K(q)')\n\n # calculate C and D\n for k, q in enumerate(qax):\n if (0.9 <= q) and (q <= 1.1):\n Kgrad = np.gradient(K, qax[1] - qax[0])\n C[k] = Kgrad[k]\n\n intmit = C[k]\n print('C({:g}) intermittency parameter is {:g}'.format(q, intmit))\n else:\n C[k] = K[k] / (q - 1)\n\n D[k] = 1 - C[k]\n\n if verbose == 1:\n # fourth axes\n plt.subplot(5,1,4)\n plt.plot(qax, C, '-o')\n plt.xlabel('q')\n plt.ylabel('C(q)')\n\n # fifth axes\n plt.subplot(5,1,5)\n plt.plot(qax, D, '-o')\n plt.xlabel('q')\n plt.ylabel('D(q)')\n\n plt.show()\n\n return intmit\n"
]
| [
[
"numpy.tile",
"numpy.min",
"numpy.mean",
"numpy.cumsum",
"numpy.gradient",
"numpy.concatenate",
"numpy.max",
"numpy.empty",
"numpy.log",
"numpy.arange",
"numpy.polyfit",
"numpy.sqrt",
"numpy.poly1d",
"numpy.log10",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.subplot",
"numpy.atleast_2d",
"numpy.array",
"matplotlib.pyplot.xscale",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.std",
"numpy.argsort",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axvline",
"numpy.sum",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.ones",
"scipy.signal.detrend",
"matplotlib.pyplot.ylabel"
]
]
|
Natenumber12/LUDO_QLearning | [
"0878b9bce01d0afc5798bdbf96db253302654f33"
]
| [
"LUDOpy-QLearn/test/Mytest.py"
]
| [
"import unittest\nimport sys\nimport torch\nfrom torch import autograd, nn, optim\nimport torch.nn.functional as F\n\nsys.path.append(\"../\")\n\nbatch_size = 5\ninput_size = 3\nhidden_size = 4\nnum_classes = 4\nlearning_rate = 0.001\n\ntorch.manual_seed(123)\ninput = autograd.Variable(torch.rand(batch_size, input_size)) - 0.5\ntarget = autograd.Variable((torch.rand(batch_size) * num_classes).long())\nprint('Target: ', target)\n# print('\\nInput: ', input)\n\nclass Model(nn.Module):\n def __init__(self,input_size, hidden_size, num_classes):\n super().__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, num_classes)\n\n def forward(self, x):\n x = self.h1(x)\n x = torch.tanh(x)\n x = self.h2(x)\n x = F.softmax(x, dim=1)\n return x\n\nmodel = Model(input_size = input_size, hidden_size = hidden_size, num_classes = num_classes)\nopt = optim.Adam(params=model.parameters(), lr=learning_rate)\n\nfor epoch in range(1000):\n out = model(input)\n # print('\\nOut: ', out)\n _, pred = out.max(1)\n # print('\\nPrediction: ', pred)\n\n loss = F.nll_loss(out, target)\n print('\\n Loss: ', loss)\n\n model.zero_grad()\n loss.backward()\n opt.step()\n\nprint('\\nTarget: ', target)\nprint('\\nPrediction: ', pred)\n # model.parameters()\n\n\n\n\n\n# def randwalk():\n# import ludopy\n# import numpy as np\n#\n# g = ludopy.Game()\n# there_is_a_winner = False\n#\n# while not there_is_a_winner:\n# (dice, move_pieces, player_pieces, enemy_pieces, player_is_a_winner,\n# there_is_a_winner), player_i = g.get_observation()\n#\n# if len(move_pieces):\n# piece_to_move = move_pieces[np.random.randint(0, len(move_pieces))]\n# else:\n# piece_to_move = -1\n#\n# _, _, _, _, _, there_is_a_winner = g.answer_observation(piece_to_move)\n#\n# print(\"Saving history to numpy file\")\n# g.save_hist(\"game_history.npy\")\n# print(\"Saving game video\")\n# g.save_hist_video(\"game_video.mp4\")\n#\n# return True\n#\n#\n# class MyTestCase(unittest.TestCase):\n# def test_something(self):\n# self.assertEqual(True, randwalk())\n#\n#\n# if __name__ == '__main__':\n# unittest.main()\n"
]
| [
[
"torch.nn.Linear",
"torch.rand",
"torch.tanh",
"torch.manual_seed",
"torch.nn.functional.softmax",
"torch.nn.functional.nll_loss"
]
]
|
akakakakakaa/pytorchic-bert | [
"055d72adce9a41c322d23145840f31a94d9ffec4"
]
| [
"tifu_prep.py"
]
| [
"import fire\nimport tensorflow as tf\nfrom tensor2tensor.data_generators import text_encoder\nimport json\nimport spacy\nfrom spacy.pipeline import Tagger\nimport csv\nimport operator\nimport pickle\nimport re\n\ndef tifu_prep(tifu_path, output_csv_path):\n nlp = spacy.load('en')\n\n\n datasets = []\n word_vocab = {}\n pos_vocab = {}\n dep_vocab = {}\n pos_dep_word_vocab = {}\n with open(\"../english-words/words.txt\") as f:\n valid_words = set(f.read().split())\n\n for line in tf.gfile.Open(tifu_path, \"rb\"):\n line = text_encoder.to_unicode_utf8(line.strip())\n line_json = json.loads(line)\n if not line_json[\"tldr\"]:\n continue\n\n inputs = line_json[\"selftext_without_tldr\"].lstrip()\n targets = line_json[\"tldr\"].lstrip()\n inputs_token = nlp(inputs)\n targets_token = nlp(targets)\n\n input_tokens = \"\"\n input_pos = \"\"\n input_syntax = \"\"\n for token in inputs_token:\n #while token.head != token:\n token_str = str(token)\n if token.pos_ == \"SPACE\":\n continue\n #if not token_str or ' ' in token_str or '\\n' in token_str or u'\\xa0' in token_str or '\\r' in token_str:\n # print(token.pos_)\n # continue\n\n input_tokens += token_str + \" \"\n input_pos += token.pos_ + \" \"\n input_syntax += token.dep_ + \" \"\n\n if len(input_tokens.split()) != len(input_pos.split()) or len(input_tokens.split()) != len(input_syntax.split()):\n print(token_str, ord(token_str), ord(' '), token.pos_)\n\n if token_str in word_vocab.keys():\n word_vocab[token_str] += 1\n else:\n word_vocab[token_str] = 1\n\n if token.pos_ in pos_vocab.keys():\n pos_vocab[token.pos_] += 1\n else:\n pos_vocab[token.pos_] = 1\n\n if token.dep_ in dep_vocab.keys():\n dep_vocab[token.dep_] += 1\n else:\n dep_vocab[token.dep_] = 1\n\n if token.pos_ in pos_dep_word_vocab.keys():\n if token.dep_ in pos_dep_word_vocab[token.pos_].keys():\n if not token_str in pos_dep_word_vocab[token.pos_][token.dep_]:\n pos_dep_word_vocab[token.pos_][token.dep_].append(token_str)\n else:\n pos_dep_word_vocab[token.pos_][token.dep_] = []\n pos_dep_word_vocab[token.pos_][token.dep_].append(token_str)\n else:\n pos_dep_word_vocab[token.pos_] = {}\n pos_dep_word_vocab[token.pos_][token.dep_] = []\n pos_dep_word_vocab[token.pos_][token.dep_].append(token_str)\n\n target_tokens = \"\"\n target_pos = \"\"\n target_syntax = \"\"\n for token in targets_token:\n #while token.head != token:\n token_str = str(token)\n if token.pos_ == \"SPACE\":\n continue\n #if not token_str or ' ' in token_str or '\\n' in token_str or u'\\xa0' in token_str or '\\r' in token_str:\n # print(token.pos_)\n # continue\n\n target_tokens += token_str + \" \"\n target_pos += token.pos_ + \" \"\n target_syntax += token.dep_ + \" \"\n if len(target_tokens.split()) != len(target_pos.split()) or len(target_tokens.split()) != len(target_syntax.split()):\n print(token_str, ord(token_str), ord(' '), token.pos_)\n\n if token_str in word_vocab.keys():\n word_vocab[token_str] += 1\n else:\n word_vocab[token_str] = 1\n\n if token.pos_ in pos_vocab.keys():\n pos_vocab[token.pos_] += 1\n else:\n pos_vocab[token.pos_] = 1\n\n if token.dep_ in dep_vocab.keys():\n dep_vocab[token.dep_] += 1\n else:\n dep_vocab[token.dep_] = 1\n\n if token.pos_ in pos_dep_word_vocab.keys():\n if token.dep_ in pos_dep_word_vocab[token.pos_].keys():\n if not token_str in pos_dep_word_vocab[token.pos_][token.dep_]:\n pos_dep_word_vocab[token.pos_][token.dep_].append(token_str)\n else:\n pos_dep_word_vocab[token.pos_][token.dep_] = []\n pos_dep_word_vocab[token.pos_][token.dep_].append(token_str)\n else:\n pos_dep_word_vocab[token.pos_] = {}\n pos_dep_word_vocab[token.pos_][token.dep_] = []\n pos_dep_word_vocab[token.pos_][token.dep_].append(token_str)\n\n\n # token = token.head\n datasets.append({\"input_token\": input_tokens.rstrip(),\n \"input_pos\": input_pos.rstrip(),\n \"input_syntax\": input_syntax.rstrip(),\n \"target_token\": target_tokens.rstrip(),\n \"target_pos\": target_pos.rstrip(),\n \"target_syntax\": target_syntax.rstrip()})\n\n print(\"num_data: \" + str(len(datasets)))\n train_len = int(len(datasets) * 0.8)\n test_len = int(len(datasets) * 0.1)\n print(\"train_len: \" + str(train_len))\n print(\"test_len: \" + str(test_len))\n\n keys = datasets[0].keys()\n with open(output_csv_path+\"/train.tsv\", 'w') as f:\n dict_writer = csv.DictWriter(f, keys, delimiter=\"\\t\")\n dict_writer.writeheader()\n dict_writer.writerows(datasets[:train_len])\n\n with open(output_csv_path+\"/dev.tsv\", 'w') as f:\n dict_writer = csv.DictWriter(f, keys, delimiter=\"\\t\")\n dict_writer.writeheader()\n dict_writer.writerows(datasets[train_len:train_len+test_len])\n\n with open(output_csv_path+\"/test.tsv\", 'w') as f:\n dict_writer = csv.DictWriter(f, keys, delimiter=\"\\t\")\n dict_writer.writeheader()\n dict_writer.writerows(datasets[train_len+test_len:])\n\n with open(output_csv_path+\"/pos_dep_word.pkl\", 'wb') as f:\n pickle.dump(pos_dep_word_vocab, f)\n\n #sorted는 dictionary를 list로 바꿈\n sorted_word_vocab = sorted(word_vocab.items(), key=operator.itemgetter(1), reverse=True)\n with open(output_csv_path+\"/word_vocab.txt\", 'w') as f:\n for key, value in sorted_word_vocab:\n f.write(key + \" \" + str(value) + \"\\n\")\n\n sorted_pos_vocab = sorted(pos_vocab.items(), key=operator.itemgetter(1), reverse=True)\n with open(output_csv_path+\"/pos_vocab.txt\", 'w') as f:\n for key, value in sorted_pos_vocab:\n f.write(key + \" \" + str(value) + \"\\n\")\n\n sorted_dep_vocab = sorted(dep_vocab.items(), key=operator.itemgetter(1), reverse=True)\n with open(output_csv_path+\"/dep_vocab.txt\", 'w') as f:\n for key, value in sorted_dep_vocab:\n f.write(key + \" \" + str(value) + \"\\n\")\n\n print(\"FINISH\")\n\n\ndef main(tifu_path='../dataset/tifu/tifu_all_tokenized_and_filtered.json',\n output_csv_path='../dataset/tifu/bert'):\n tifu_prep(tifu_path, output_csv_path)\n\n\nif __name__ == '__main__':\n fire.Fire(main)"
]
| [
[
"tensorflow.gfile.Open"
]
]
|
hitSongXiao/ripple2vec | [
"90d1887eb1ba693abad93479d558336e69179ec3"
]
| [
"src/algorithms.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom time import time\nfrom collections import deque\nimport numpy as np\nimport math,random,logging\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\nimport multiprocessing as mp\nfrom collections import defaultdict\n\nfrom utils import *\n\n\ndef generate_parameters_random_walk(workers):\n\n logging.info('Loading distances_nets from disk...')\n\n sum_weights = {}\n amount_edges = {}\n \n layer = 0\n while(isPickle('distances_nets_weights-layer-'+str(layer))):\n logging.info('Executing layer {}...'.format(layer)) \n weights = restoreVariableFromDisk('distances_nets_weights-layer-'+str(layer))\n \n for k,list_weights in weights.items():\n if(layer not in sum_weights):\n sum_weights[layer] = 0\n if(layer not in amount_edges):\n amount_edges[layer] = 0\n\n for w in list_weights:\n sum_weights[layer] += w\n amount_edges[layer] += 1\n \n logging.info('Layer {} executed.'.format(layer))\n layer += 1\n\n average_weight = {}\n for layer in sum_weights.keys():\n average_weight[layer] = sum_weights[layer] / amount_edges[layer]\n\n logging.info(\"Saving average_weights on disk...\")\n saveVariableOnDisk(average_weight,'average_weight')\n\n amount_neighbours = {}\n\n layer = 0\n while(isPickle('distances_nets_weights-layer-'+str(layer))):\n logging.info('Executing layer {}...'.format(layer)) \n weights = restoreVariableFromDisk('distances_nets_weights-layer-'+str(layer))\n\n amount_neighbours[layer] = {}\n\n for k,list_weights in weights.items():\n cont_neighbours = 0\n for w in list_weights:\n if(w > average_weight[layer]):\n cont_neighbours += 1\n amount_neighbours[layer][k] = cont_neighbours\n\n logging.info('Layer {} executed.'.format(layer))\n layer += 1\n\n logging.info(\"Saving amount_neighbours on disk...\")\n saveVariableOnDisk(amount_neighbours,'amount_neighbours')\n\ndef chooseNeighbor(v,graphs,alias_method_j,alias_method_q,layer):\n v_list = graphs[layer][v]\n\n idx = alias_draw(alias_method_j[layer][v],alias_method_q[layer][v])\n v = v_list[idx]\n return v\n\n\ndef exec_random_walk(graphs,alias_method_j,alias_method_q,v,walk_length,amount_neighbours,node_layer,iniLayerZero):\n original_v = v\n t0 = time()\n if(iniLayerZero):\n initialLayer = 0\n else:\n initialLayer = node_layer[v]-1\n layer = initialLayer\n\n\n path = deque()\n path.append(v)\n\n while len(path) < walk_length:\n r = random.random()\n if(v in graphs[layer]):\n if(r < 0.3):\n v = chooseNeighbor(v,graphs,alias_method_j,alias_method_q,layer)\n path.append(v)\n\n else:\n r = random.random()\n if(v in amount_neighbours[layer]):\n limiar_moveup = prob_moveup(amount_neighbours[layer][v])\n if(r > limiar_moveup):\n if(layer > initialLayer):\n layer = layer - 1 \n else:\n if((layer + 1) in graphs and v in graphs[layer + 1] and v in amount_neighbours[layer+1]):\n layer = layer + 1\n else:\n layer = layer - 1 \n\n t1 = time()\n logging.info('RW - vertex {}. Time : {}s'.format(original_v,(t1-t0)))\n\n return path\n\n\ndef exec_ramdom_walks_for_chunck(vertices,graphs,alias_method_j,alias_method_q,walk_length,amount_neighbours,node_layer,iniLayerZero):\n walks = deque()\n for v in vertices:\n walks.append(exec_random_walk(graphs,alias_method_j,alias_method_q,v,walk_length,amount_neighbours,node_layer,iniLayerZero))\n return walks\n\ndef generate_random_walks_large_graphs(num_walks,walk_length,workers,vertices,node_layer,iniLayerZero):\n\n logging.info('Loading distances_nets from disk...')\n\n graphs = restoreVariableFromDisk('distances_nets_graphs')\n alias_method_j = restoreVariableFromDisk('nets_weights_alias_method_j')\n alias_method_q = restoreVariableFromDisk('nets_weights_alias_method_q')\n amount_neighbours = restoreVariableFromDisk('amount_neighbours')\n\n logging.info('Creating RWs...')\n t0 = time()\n \n walks = deque()\n initialLayer = 0\n\n parts = workers\n\n with ProcessPoolExecutor(max_workers=workers) as executor:\n\n for walk_iter in range(num_walks):\n random.shuffle(vertices)\n logging.info(\"Execution iteration {} ...\".format(walk_iter))\n walk = exec_ramdom_walks_for_chunck(vertices,graphs,alias_method_j,alias_method_q,walk_length,amount_neighbours,node_layer,iniLayerZero)\n walks.extend(walk)\n logging.info(\"Iteration {} executed.\".format(walk_iter))\n\n\n\n t1 = time()\n logging.info('RWs created. Time : {}m'.format((t1-t0)/60))\n logging.info(\"Saving Random Walks on disk...\")\n save_random_walks(walks)\n\ndef generate_random_walks(num_walks,walk_length,workers,vertices,node_layer,iniLayerZero):\n\n logging.info('Loading distances_nets on disk...')\n\n graphs = restoreVariableFromDisk('distances_nets_graphs')\n alias_method_j = restoreVariableFromDisk('nets_weights_alias_method_j')\n alias_method_q = restoreVariableFromDisk('nets_weights_alias_method_q')\n amount_neighbours = restoreVariableFromDisk('amount_neighbours')\n\n logging.info('Creating RWs...')\n t0 = time()\n \n walks = deque()\n initialLayer = 0\n\n if(workers > num_walks):\n workers = num_walks\n\n with ProcessPoolExecutor(max_workers=workers) as executor:\n futures = {}\n for walk_iter in range(num_walks):\n random.shuffle(vertices)\n job = executor.submit(exec_ramdom_walks_for_chunck,vertices,graphs,alias_method_j,alias_method_q,walk_length,amount_neighbours,node_layer,iniLayerZero)\n futures[job] = walk_iter\n #part += 1\n logging.info(\"Receiving results...\")\n for job in as_completed(futures):\n walk = job.result()\n r = futures[job]\n logging.info(\"Iteration {} executed.\".format(r))\n walks.extend(walk)\n del futures[job]\n\n\n t1 = time()\n logging.info('RWs created. Time: {}m'.format((t1-t0)/60))\n logging.info(\"Saving Random Walks on disk...\")\n save_random_walks(walks)\n\ndef save_random_walks(walks):\n with open('random_walks.txt', 'w') as file:\n for walk in walks:\n line = ''\n for v in walk:\n line += str(v)+' '\n line += '\\n'\n file.write(line)\n return\n\ndef prob_moveup(amount_neighbours):\n x = math.log(amount_neighbours + math.e)\n p = (x / ( x + 1))\n return p\n\n\n\ndef alias_draw(J, q):\n '''\n Draw sample from a non-uniform discrete distribution using alias sampling.\n '''\n K = len(J)\n\n kk = int(np.floor(np.random.rand()*K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n"
]
| [
[
"numpy.random.rand"
]
]
|
rmcaixeta/variography_with_local_directions | [
"4c850440e09953b2bfb1a990ba87e0eee1051aad"
]
| [
"03_model_vario.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\r\nimport PySimpleGUI as sg\r\nimport os\r\nimport lva_vario_funcs as lva\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\n\r\nsg.theme('DarkGrey3')\r\nsg.SetOptions( button_color=('black','#ff7636') )\r\ndmdir = 'C:'\r\n\r\n\r\ndef draw_figure(canvas, figure, loc=(0, 0)):\r\n\tcanvas.delete('ALL')\r\n\tfigure_canvas_agg = FigureCanvasTkAgg(figure, canvas)\r\n\tfigure_canvas_agg.draw()\r\n\tfigure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)\r\n\treturn figure_canvas_agg\r\n\r\n\r\n\r\nlayout_pars = [\r\n\t[sg.Text(' ')],\r\n\t[sg.Text('Exp. Vario:', size=(10, 1)), sg.InputText(dmdir+'/exp_variography.csv',key='DATA',size=(40, 1)),sg.FileBrowse(initial_folder=dmdir,file_types=((\"CSV Files\", \"*.csv\"),))],\r\n\t[sg.Text(' ')],\r\n\t[sg.Text('Model type:', size=(10, 1)), sg.InputCombo(('Spherical', 'Exponential'), default_value='Spherical',key='MODTYP',readonly=True)],\r\n\t[sg.Text(' ')],\r\n\t[sg.Text('Nugget:', size=(20, 1)), sg.InputText('0.08',key='NGT',size=(10, 1))],\r\n\t[sg.Text('1st structure cc:', size=(20, 1)), sg.InputText('0.54',key='CC1',size=(10, 1))],\r\n\t[sg.Text('1st structure ranges:', size=(20, 1)),sg.InputText('39.9',key='R11',size=(10, 1)),sg.InputText('36.8',key='R12',size=(10, 1)),sg.InputText('0',key='R13',size=(10, 1))],\r\n\t[sg.Text('2nd structure cc:', size=(20, 1)), sg.InputText('0.38',key='CC2',size=(10, 1))],\r\n\t[sg.Text('2nd structure ranges:', size=(20, 1)),sg.InputText('132',key='R21',size=(10, 1)),sg.InputText('36.8',key='R22',size=(10, 1)),sg.InputText('0',key='R23',size=(10, 1))],\r\n\t[sg.Text('3rd structure cc:', size=(20, 1)), sg.InputText('0.0',key='CC3',size=(10, 1))],\r\n\t[sg.Text('3rd structure ranges:', size=(20, 1)),sg.InputText('0',key='R31',size=(10, 1)),sg.InputText('0',key='R32',size=(10, 1)),sg.InputText('0',key='R33',size=(10, 1))],\r\n\t[sg.Text(' ')],\r\n\t[sg.Text('Show pairs histogram:', size=(20, 1)),sg.Checkbox(' ',key='HIST',default=True)],\r\n\t[sg.Text(' ')],\r\n\t[sg.Button('Run'), sg.Button('Close')]\r\n]\r\n\r\n\r\nlayout = [\r\n\t[sg.Column(layout_pars)]\r\n]\r\n\r\nwindow = sg.Window('LVA Vario', layout, finalize=True)\r\n\r\nwhile True: \r\n\tevent, vals = window.Read() \r\n\tif event is None: break\r\n\tif event=='Close':\r\n\t\twindow.Close()\r\n\t\tbreak\r\n\tif event=='Run':\r\n\t\t\r\n\t\tpar={}\r\n\t\tfor x,ax in enumerate(['MAX','MED','MIN']):\r\n\t\t\tpar[ax] = {}\r\n\t\t\tpar[ax]['a1'] = float(vals['R1'+str(x+1)])\r\n\t\t\tpar[ax]['a2'] = float(vals['R2'+str(x+1)])\r\n\t\t\tpar[ax]['a3'] = float(vals['R3'+str(x+1)])\r\n\r\n\t\twindow.Close()\r\n\t\tfig_parts = lva.model_vario(vals['DATA'],vals['MODTYP'],float(vals['NGT']),float(vals['CC1']),float(vals['CC2']),float(vals['CC3']),vals['HIST'],par)\r\n\t\t\t\t\r\n\t\tlayout_pars = [\r\n\t\t\t[sg.Text(' ')],\r\n\t\t\t[sg.Text('Exp. Vario:', size=(10, 1)), sg.InputText(vals['DATA'],key='DATA',size=(40, 2)),sg.FileBrowse(initial_folder=dmdir,file_types=((\"CSV Files\", \"*.csv\"),))],\r\n\t\t\t[sg.Text(' ')],\r\n\t\t\t[sg.Text('Model type:', size=(10, 1)), sg.InputCombo(('Spherical', 'Exponential'), default_value=vals['MODTYP'],key='MODTYP',readonly=True)],\r\n\t\t\t[sg.Text(' ')],\r\n\t\t\t[sg.Text('Nugget:', size=(20, 1)), sg.InputText(vals['NGT'],key='NGT',size=(10, 1))],\r\n\t\t\t[sg.Text('1st structure cc:', size=(20, 1)), sg.InputText(vals['CC1'],key='CC1',size=(10, 1))],\r\n\t\t\t[sg.Text('1st structure ranges:', size=(20, 1)),sg.InputText(vals['R11'],key='R11',size=(10, 1)),sg.InputText(vals['R12'],key='R12',size=(10, 1)),sg.InputText(vals['R13'],key='R13',size=(10, 1))],\r\n\t\t\t[sg.Text('2nd structure cc:', size=(20, 1)), sg.InputText(vals['CC2'],key='CC2',size=(10, 1))],\r\n\t\t\t[sg.Text('2nd structure ranges:', size=(20, 1)),sg.InputText(vals['R21'],key='R21',size=(10, 1)),sg.InputText(vals['R22'],key='R22',size=(10, 1)),sg.InputText(vals['R23'],key='R23',size=(10, 1))],\r\n\t\t\t[sg.Text('3rd structure cc:', size=(20, 1)), sg.InputText(vals['CC3'],key='CC3',size=(10, 1))],\r\n\t\t\t[sg.Text('3rd structure ranges:', size=(20, 1)),sg.InputText(vals['R31'],key='R31',size=(10, 1)),sg.InputText(vals['R32'],key='R32',size=(10, 1)),sg.InputText(vals['R33'],key='R33',size=(10, 1))],\r\n\t\t\t[sg.Text(' ')],\r\n\t\t\t[sg.Text('Show pairs histogram:', size=(20, 1)),sg.Checkbox(' ',key='HIST',default=vals['HIST'])],\r\n\t\t\t[sg.Text(' ')],\r\n\t\t\t[sg.Button('Run'), sg.Button('Close')]\r\n\t\t]\r\n\t\t\r\n\t\tout_layout = [[sg.Canvas(size=(fig_parts[1], fig_parts[2]),key='canvas')]]\r\n\t\tlayout = [[sg.Column(layout_pars),sg.Column(out_layout)]]\r\n\t\t\r\n\t\twindow = sg.Window('LVA Vario', layout, finalize=True)\r\n\t\t#window = sg.Window('Model', out_layout, finalize=True)\r\n\t\tfig_canvas_agg = draw_figure(window['canvas'].TKCanvas, fig_parts[0])\r\n\t\twindow.UnHide()\r\n\t\t\r\n\t\t\r\n"
]
| [
[
"matplotlib.use",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
]
]
|
dongdongdong1217/Detectron2-FC | [
"92356ebbf52b4e39c94537af26abcf46419c8c2f"
]
| [
"demo/demo.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport argparse\nimport glob\nimport multiprocessing as mp\nimport numpy as np\nimport os\nimport tempfile\nimport time\nimport warnings\nimport cv2\nimport tqdm\n\nfrom detectron2.config import get_cfg\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.utils.logger import setup_logger\n\nfrom predictor import VisualizationDemo\n\n# constants\nWINDOW_NAME = \"COCO detections\"\n\n\ndef setup_cfg(args):\n # load config from file and command-line arguments\n cfg = get_cfg()\n # To use demo for Panoptic-DeepLab, please uncomment the following two lines.\n # from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa\n # add_panoptic_deeplab_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # Set score_threshold for builtin models\n cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold\n cfg.freeze()\n return cfg\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Detectron2 demo for builtin configs\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--webcam\", action=\"store_true\", help=\"Take inputs from webcam.\")\n parser.add_argument(\"--video-input\", help=\"Path to video file.\")\n parser.add_argument(\n \"--input\",\n nargs=\"+\",\n default=[\"demo/images/000023.jpg\"],\n help=\"A list of space separated input images; \"\n \"or a single glob pattern such as 'directory/*.jpg'\",\n )\n parser.add_argument(\n \"--output\",\n help=\"A file or directory to save output visualizations. \"\n \"If not given, will show output in an OpenCV window.\",\n )\n\n parser.add_argument(\n \"--confidence-threshold\",\n type=float,\n default=0.5,\n help=\"Minimum score for instance predictions to be shown\",\n )\n parser.add_argument(\n \"--opts\",\n help=\"Modify config options using the command-line 'KEY VALUE' pairs\",\n default=[],\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\ndef test_opencv_video_format(codec, file_ext):\n with tempfile.TemporaryDirectory(prefix=\"video_format_test\") as dir:\n filename = os.path.join(dir, \"test_file\" + file_ext)\n writer = cv2.VideoWriter(\n filename=filename,\n fourcc=cv2.VideoWriter_fourcc(*codec),\n fps=float(30),\n frameSize=(10, 10),\n isColor=True,\n )\n [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]\n writer.release()\n if os.path.isfile(filename):\n return True\n return False\n\n\nif __name__ == \"__main__\":\n mp.set_start_method(\"spawn\", force=True)\n args = get_parser().parse_args()\n setup_logger(name=\"fvcore\")\n logger = setup_logger()\n logger.info(\"Arguments: \" + str(args))\n\n cfg = setup_cfg(args)\n\n demo = VisualizationDemo(cfg)\n\n if args.input:\n if len(args.input) == 1:\n args.input = glob.glob(os.path.expanduser(args.input[0]))\n assert args.input, \"The input path(s) was not found\"\n for path in tqdm.tqdm(args.input, disable=not args.output):\n # use PIL, to be consistent with evaluation\n img = read_image(path, format=\"BGR\")\n start_time = time.time()\n predictions, visualized_output = demo.run_on_image(img)\n logger.info(\n \"{}: {} in {:.2f}s\".format(\n path,\n \"detected {} instances\".format(len(predictions[\"instances\"]))\n if \"instances\" in predictions\n else \"finished\",\n time.time() - start_time,\n )\n )\n\n if args.output:\n if os.path.isdir(args.output):\n assert os.path.isdir(args.output), args.output\n out_filename = os.path.join(args.output, os.path.basename(path))\n else:\n assert len(args.input) == 1, \"Please specify a directory with args.output\"\n out_filename = args.output\n visualized_output.save(out_filename)\n else:\n cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)\n cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])\n if cv2.waitKey(0) == 27:\n break # esc to quit\n elif args.webcam:\n assert args.input is None, \"Cannot have both --input and --webcam!\"\n assert args.output is None, \"output not yet supported with --webcam!\"\n cam = cv2.VideoCapture(0)\n for vis in tqdm.tqdm(demo.run_on_video(cam)):\n cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)\n cv2.imshow(WINDOW_NAME, vis)\n if cv2.waitKey(1) == 27:\n break # esc to quit\n cam.release()\n cv2.destroyAllWindows()\n elif args.video_input:\n video = cv2.VideoCapture(args.video_input)\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frames_per_second = video.get(cv2.CAP_PROP_FPS)\n num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n basename = os.path.basename(args.video_input)\n codec, file_ext = (\n (\"x264\", \".mkv\") if test_opencv_video_format(\"x264\", \".mkv\") else (\"mp4v\", \".mp4\")\n )\n if codec == \".mp4v\":\n warnings.warn(\"x264 codec not available, switching to mp4v\")\n if args.output:\n if os.path.isdir(args.output):\n output_fname = os.path.join(args.output, basename)\n output_fname = os.path.splitext(output_fname)[0] + file_ext\n else:\n output_fname = args.output\n assert not os.path.isfile(output_fname), output_fname\n output_file = cv2.VideoWriter(\n filename=output_fname,\n # some installation of opencv may not support x264 (due to its license),\n # you can try other format (e.g. MPEG)\n fourcc=cv2.VideoWriter_fourcc(*codec),\n fps=float(frames_per_second),\n frameSize=(width, height),\n isColor=True,\n )\n assert os.path.isfile(args.video_input)\n for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):\n if args.output:\n output_file.write(vis_frame)\n else:\n cv2.namedWindow(basename, cv2.WINDOW_NORMAL)\n cv2.imshow(basename, vis_frame)\n if cv2.waitKey(1) == 27:\n break # esc to quit\n video.release()\n if args.output:\n output_file.release()\n else:\n cv2.destroyAllWindows()\n"
]
| [
[
"numpy.zeros"
]
]
|
sfu-arch/TensorBricks | [
"c46c60d0939b7deb65f103bf34961d47419ce571"
]
| [
"TB-scheduler/deprecated/print_attr_dicts_from_constraints.py"
]
| [
"import pandas as pd\nfrom load_model import Net\nimport datetime\nimport argparse\nimport os\n\n# def select_schedule(_net, _model_name, _result_dir, _verbose, val, _hardware_dict):\n# if val == 0:\n# schedule = HWCFSchedule(_net, _model_name, _result_dir, _verbose, hardware_dict=_hardware_dict)\n# elif val == 1:\n# schedule = HWCFScheduleDWPW(_net, _model_name, _result_dir, _verbose, hardware_dict=_hardware_dict)\n# elif val == 2:\n# schedule = HWCFSchedulePWDW(_net, _model_name, _result_dir, _verbose, hardware_dict=_hardware_dict)\n# elif val == 3:\n# schedule = HWCFSchedulePDP(_net, _model_name, _result_dir, _verbose, hardware_dict=_hardware_dict)\n# else:\n# raise Exception('wrong val')\n#\n# schedule.run_model()\n# schedule.print_stats()\n# return\n\n\nif __name__ == '__main__':\n print(datetime.datetime.now())\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=int, required=True,\n help=\"0=HWCFSchedule, 1=HWCFScheduleDWPW, 2=HWCFSchedulePWDW, 3=HWCFSchedulePDP\")\n parser.add_argument('-m', type=int, required=True,\n help=\"total macs = 100, 500, 1000, 3000\")\n\n args = parser.parse_args()\n schedule_value = args.s\n total_mac = args.m\n total_padds = total_mac\n step_size = 10\n\n model_names = ['pdp', 'efficientnet-b3', 'xception', 'densenet161',\n 'inception_v3', 'resnet152', 'efficientnet-b0', 'resnet50',\n 'mobilenet_v2', 'nasnetamobile', 'mnasnet1_0', 'vgg16',\n 'mobilenet', 'resnet18', 'shufflenet_v2_x1_0',\n 'squeezenet1_0', 'alexnet']\n\n data_folder = './raw_data/benchmarks/'\n hardware_yaml = None\n\n verbose = False\n for model_name in model_names[8:9]:\n df = pd.read_csv(data_folder + model_name + '.csv')\n result_dir = './gen_data/benchmarks_results/' + model_name + '/' + str(total_mac) + '/'\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n net = Net(df)\n\n constraint_files = ['per_layer_constraint_' + str(total_mac)+'.csv',\n 'dp_constraint_' + str(total_mac)+'.csv', 'pdp_constraint' + str(total_mac)+'.csv']\n per_layer_param_list = ['wxx', 'cxx', 'fx', 'cx', 'wx']\n dp_param_list = ['wx', 'cxx2', 'fx2', 'cx']\n pdp_param_list = ['wxx', 'fx', 'cxx', 'fx2']\n\n if schedule_value == 0:\n file = 'per_layer_constraint_' + str(total_mac) + '.csv'\n params = per_layer_param_list\n elif schedule_value == 1:\n file = 'dp_constraint_' + str(total_mac) + '.csv'\n params = dp_param_list\n elif schedule_value == 3:\n file = 'pdp_constraint_' + str(total_mac) + '.csv'\n params = pdp_param_list\n else:\n raise Exception('Value unsupported')\n\n data = pd.read_csv(\"get_plots/get_constraints/results_selected/\" + file)\n # new_df = select_pdp_constraints(data)\n # new_df = select_min_max_constraints(data, params)\n new_df = data\n hw_dict = AttrDict()\n hw_dict_list = []\n\n for index, row in new_df.iterrows():\n # print(index, row)\n if file == 'per_layer_constraint_' + str(total_mac) + '.csv':\n hw_dict_list = get_per_layer_params(row['wxx'], row['cxx'], row['fx'], row['cx'], row['wx'],total_padds)\n schedule_value = 0\n for hw_dict in hw_dict_list:\n print(hw_dict)\n elif file == 'dp_constraint_' + str(total_mac) + '.csv':\n hw_dict_list = get_dp_params(row['wx'], row['cxx2'], row['fx2'], row['cx'], total_padds)\n for hw_dict in hw_dict_list:\n print(hw_dict)\n schedule_value = 1\n elif file == 'pdp_constraint_' + str(total_mac) + '.csv':\n hw_dict_list = get_pdp_params(row['wxx'], row['fx'], row['cxx'], row['fx2'], total_padds, step_size)\n schedule_value = 3\n for hw_dict in hw_dict_list:\n print(hw_dict)\n else:\n raise Exception('constraint file not known')\n\n # for hw_dict in hw_dict_list:\n # select_schedule(net, model_name, result_dir, verbose, schedule_value, hw_dict)\n\n print(datetime.datetime.now())\n\n"
]
| [
[
"pandas.read_csv"
]
]
|
YiqinXiong/HUST-MachineLearning-Lab | [
"1e920e59b58a721f263e0a87a7f98ff2dda3f7c4"
]
| [
"3-final/voice_gender/main.py"
]
| [
"# -*- coding: utf-8 -*-\n# Author: 熊逸钦\n# Time: 2020/7/25 17:04\n\n\nimport math\n\nimport pandas as pd\nfrom scipy.stats import norm\n\n\n# 拆分训练集和测试集(7:3)\ndef from_csv_to_data_frame(csv_file):\n df = pd.read_csv(csv_file)\n # 分男女,对每一列求非空值的平均\n df_male = df[df['label'] == 'male'].iloc[:, 0:20]\n df_female = df[df['label'] == 'female'].iloc[:, 0:20]\n df_male_valid_mean = df_male[df_male != 0].mean()\n df_female_valid_mean = df_female[df_female != 0].mean()\n # 分性别,按照平均值填补空缺值\n for i in df_male.columns:\n df.loc[(df['label'] == 'male') & (df[i] == 0), i] = df_male_valid_mean[i]\n df.loc[(df['label'] == 'female') & (df[i] == 0), i] = df_female_valid_mean[i]\n # 打乱顺序后的data frame,先混洗,再随机抽取70%作为训练集,再将剩余的30%作为测试集\n df = df.sample(frac=1.0)\n df_train = df.sample(frac=0.7)\n df_test = df[~df.index.isin(df_train.index)]\n return df_train, df_test\n\n\n# 获取属性列的平均值μ和标准差σ\ndef get_mean_std(df_train):\n # 计算每一个属性列的平均值和标准差\n df_train_male = df_train[df_train['label'] == 'male']\n df_train_female = df_train[df_train['label'] == 'female']\n mean_train_male = df_train_male.iloc[:, 0:20].mean().tolist()\n std_train_male = df_train_male.iloc[:, 0:20].std().tolist()\n mean_train_female = df_train_female.iloc[:, 0:20].mean().tolist()\n std_train_female = df_train_female.iloc[:, 0:20].std().tolist()\n return mean_train_male, std_train_male, mean_train_female, std_train_female\n\n\n# 计算各个属性值的相对偏差,以相对偏差代表权重\ndef get_weight(mean_1, mean_2):\n weight = []\n # 计算各个属性值的相对偏差\n for i in range(len(mean_1)):\n a_male = mean_1[i]\n a_female = mean_2[i]\n weight.append(100 * abs(a_male - a_female) / ((a_male + a_female) / 2))\n # 将相对偏差规范化处理\n sum_weight = sum(weight)\n for i in range(len(weight)):\n weight[i] = (20 * weight[i]) / sum_weight\n return weight\n\n\n# 朴素贝叶斯分类器\ndef naive_bayes_classifier(row_id, is_log=True, is_weight=True):\n if is_log:\n p_male_cond = math.log(p_male)\n p_female_cond = math.log(p_female)\n else:\n p_male_cond = p_male\n p_female_cond = p_female\n # 遍历每一属性列\n for i in range(test_df.shape[1] - 1):\n # 带权重时取属性权重,否则取1.0\n weight = attr_weight[i] if is_weight else 1.0\n # 用高斯分布函数计算条件概率\n g_male = norm.cdf(test_df.iloc[row_id, i], mean_male[i], std_male[i])\n g_female = norm.cdf(test_df.iloc[row_id, i], mean_female[i], std_female[i])\n # 取对数时计算条件概率的对数累加之和,否则计算条件概率累乘之积\n if is_log:\n p_male_cond += weight * math.log(g_male)\n p_female_cond += weight * math.log(g_female)\n else:\n p_male_cond *= pow(g_male, weight)\n p_female_cond *= pow(g_female, weight)\n return 'male' if p_male_cond > p_female_cond else 'female'\n\n\nif __name__ == '__main__':\n # 从csv文件读取出训练集和测试集的data_frame\n train_df, test_df = from_csv_to_data_frame(\"voice.csv\")\n # 得到P(男)和P(女)的先验概率\n p_male = len(train_df[train_df['label'] == 'male']) / len(train_df)\n p_female = 1 - p_male\n # 得到训练集数据中各列平均值和标准差,用于计算高斯分布概率\n mean_male, std_male, mean_female, std_female = get_mean_std(train_df)\n # 得到各个属性列的权重\n attr_weight = get_weight(mean_male, mean_female)\n # 最终输出的一些统计量\n male_all = len(test_df[test_df['label'] == 'male'])\n female_all = len(test_df[test_df['label'] == 'female'])\n male_hit = 0\n female_hit = 0\n # 对测试集中的每个成员,比较P(男|条件)和P(女|条件)的大小,取对数累加\n for row in range(len(test_df)):\n # # debug用\n # collect_male = []\n # collect_female = []\n\n # 使用贝叶斯分类器对样本进行分类\n result = naive_bayes_classifier(row, True, True)\n # 判断分类结果是否正确\n if test_df.iloc[row, test_df.shape[1] - 1] == result:\n if result == 'male':\n male_hit += 1\n else:\n female_hit += 1\n # print(\"[Y]\", p_male_cond, p_female_cond, result, test_label[row][0])\n # else:\n # print(\"[N]\", p_male_cond, p_female_cond, result, test_label[row][0])\n # print(collect_male)\n # print(collect_female)\n print(\n \"男性测试数:%5d\\t正确数:%5d\\t正确率:%.4f\" % (\n male_all,\n male_hit,\n male_hit / male_all))\n print(\n \"女性测试数:%5d\\t正确数:%5d\\t正确率:%.4f\" % (\n female_all,\n female_hit,\n female_hit / female_all))\n print(\n \"测试集大小:%5d\\t正确数:%5d\\t正确率:%.4f\" % (\n male_all + female_all,\n male_hit + female_hit,\n (male_hit + female_hit) / (male_all + female_all)))\n"
]
| [
[
"pandas.read_csv",
"scipy.stats.norm.cdf"
]
]
|
peteroconnor-bc/artemis | [
"ad2871fae7d986bf10580eec27aee5b7315adad5"
]
| [
"artemis/general/pareto_efficiency.py"
]
| [
"from artemis.general.should_be_builtins import all_equal\n\n__author__ = 'peter'\nimport numpy as np\n\n\n# Very slow for many datapoints. Fastest for many costs, most readable\ndef is_pareto_efficient_dumb(costs):\n \"\"\"\n Find the pareto-efficient points\n :param costs: An (n_points, n_costs) array\n :return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient\n \"\"\"\n is_efficient = np.ones(costs.shape[0], dtype = bool)\n for i, c in enumerate(costs):\n is_efficient[i] = np.all(np.any(costs[:i]>c, axis=1)) and np.all(np.any(costs[i+1:]>c, axis=1))\n return is_efficient\n\n\n# Fairly fast for many datapoints, less fast for many costs, somewhat readable\ndef is_pareto_efficient_simple(costs):\n \"\"\"\n Find the pareto-efficient points\n :param costs: An (n_points, n_costs) array\n :return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient\n \"\"\"\n is_efficient = np.ones(costs.shape[0], dtype = bool)\n for i, c in enumerate(costs):\n if is_efficient[i]:\n is_efficient[is_efficient] = np.any(costs[is_efficient]<c, axis=1) # Keep any point with a lower cost\n is_efficient[i] = True # And keep self\n return is_efficient\n\n\n# Faster than is_pareto_efficient_simple, but less readable.\ndef is_pareto_efficient(costs, return_mask = True):\n \"\"\"\n Find the pareto-efficient points\n :param costs: An (n_points, n_costs) array\n :param return_mask: True to return a mask\n :return: An array of indices of pareto-efficient points.\n If return_mask is True, this will be an (n_points, ) boolean array\n Otherwise it will be a (n_efficient_points, ) integer array of indices.\n \"\"\"\n is_efficient = np.arange(costs.shape[0])\n n_points = costs.shape[0]\n next_point_index = 0 # Next index in the is_efficient array to search for\n while next_point_index<len(costs):\n nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1)\n nondominated_point_mask[next_point_index] = True\n is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points\n costs = costs[nondominated_point_mask]\n next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1\n if return_mask:\n is_efficient_mask = np.zeros(n_points, dtype = bool)\n is_efficient_mask[is_efficient] = True\n return is_efficient_mask\n else:\n return is_efficient\n\n\ndef is_pareto_efficient_reordered(costs):\n ixs = np.argsort(((costs-costs.mean(axis=0))/(costs.std(axis=0)+1e-7)).sum(axis=1))\n costs = costs[ixs]\n is_efficient = is_pareto_efficient_simple(costs)\n is_efficient[ixs] = is_efficient.copy()\n return is_efficient\n\n\ndef is_pareto_efficient_indexed_reordered(costs, return_mask=True):\n ixs = np.argsort(((costs-costs.mean(axis=0))/(costs.std(axis=0)+1e-7)).sum(axis=1))\n costs = costs[ixs]\n is_efficient = is_pareto_efficient(costs, return_mask=return_mask)\n is_efficient[ixs] = is_efficient.copy()\n return is_efficient\n\n\ndef find_pareto_ixs(cost_arrays):\n \"\"\"\n :param cost_arrays: A collection of nd-arrays representing a grid of costs for different indices.\n :return: A tuple of indices which can be used to index the pareto-efficient points.\n \"\"\"\n assert all_equal([c.shape for c in cost_arrays])\n flat_ixs, = np.nonzero(is_pareto_efficient_simple(np.reshape(cost_arrays, (len(cost_arrays), -1)).T), )\n ixs = np.unravel_index(flat_ixs, dims=cost_arrays[0].shape)\n return ixs\n"
]
| [
[
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"numpy.unravel_index",
"numpy.any",
"numpy.arange"
]
]
|
KangMingHsi/DeepRL_cs294-112 | [
"60f9fbb65eaaf444f67acb359aca04097864241f"
]
| [
"hw2/train_pg_f18.py"
]
| [
"\"\"\"\nOriginal code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017\nAdapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam\nAdapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport logz\nimport os\nimport time\nimport inspect\nfrom multiprocessing import Process\n\n#============================================================================================#\n# Utilities\n#============================================================================================#\n\n#========================================================================================#\n# ----------PROBLEM 2----------\n#========================================================================================# \ndef build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):\n \"\"\"\n Builds a feedforward neural network\n \n arguments:\n input_placeholder: placeholder variable for the state (batch_size, input_size)\n output_size: size of the output layer\n scope: variable scope of the network\n n_layers: number of hidden layers\n size: dimension of the hidden layer\n activation: activation of the hidden layers\n output_activation: activation of the ouput layers\n\n returns:\n output placeholder of the network (the result of a forward pass) \n\n Hint: use tf.layers.dense \n \"\"\"\n # YOUR CODE HERE\n with tf.variable_scope(scope):\n net = input_placeholder\n for layer in range(n_layers):\n net = tf.layers.dense(net, size, activation=activation, name=('layer_'+str(layer)))\n output_placeholder = tf.layers.dense(net, output_size, activation=output_activation, name='output')\n \n #raise NotImplementedError\n return output_placeholder\n\ndef pathlength(path):\n return len(path[\"reward\"])\n\ndef setup_logger(logdir, locals_):\n # Configure output directory for logging\n logz.configure_output_dir(logdir)\n # Log experimental parameters\n args = inspect.getargspec(train_PG)[0]\n params = {k: locals_[k] if k in locals_ else None for k in args}\n logz.save_params(params)\n\n#============================================================================================#\n# Policy Gradient\n#============================================================================================#\n\nclass Agent(object):\n def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):\n super(Agent, self).__init__()\n self.ob_dim = computation_graph_args['ob_dim']\n self.ac_dim = computation_graph_args['ac_dim']\n self.discrete = computation_graph_args['discrete']\n self.size = computation_graph_args['size']\n self.n_layers = computation_graph_args['n_layers']\n self.learning_rate = computation_graph_args['learning_rate']\n\n self.animate = sample_trajectory_args['animate']\n self.max_path_length = sample_trajectory_args['max_path_length']\n self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']\n\n self.gamma = estimate_return_args['gamma']\n self.reward_to_go = estimate_return_args['reward_to_go']\n self.nn_baseline = estimate_return_args['nn_baseline']\n self.normalize_advantages = estimate_return_args['normalize_advantages']\n self.gae_gamma = estimate_return_args['gae_gamma']\n\n def init_tf_sess(self):\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) \n self.sess = tf.Session(config=tf_config)\n self.sess.__enter__() # equivalent to `with self.sess:`\n tf.global_variables_initializer().run() #pylint: disable=E1101\n\n #========================================================================================#\n # ----------PROBLEM 2----------\n #========================================================================================#\n def define_placeholders(self):\n \"\"\"\n Placeholders for batch batch observations / actions / advantages in policy gradient \n loss function.\n See Agent.build_computation_graph for notation\n\n returns:\n sy_ob_no: placeholder for observations\n sy_ac_na: placeholder for actions\n sy_adv_n: placeholder for advantages\n \"\"\"\n #raise NotImplementedError\n sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name=\"ob\", dtype=tf.float32)\n if self.discrete:\n sy_ac_na = tf.placeholder(shape=[None], name=\"ac\", dtype=tf.int32) \n else:\n sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name=\"ac\", dtype=tf.float32) \n # YOUR CODE HERE\n sy_adv_n = tf.placeholder(shape=[None], name=\"adv\", dtype=tf.float32) \n return sy_ob_no, sy_ac_na, sy_adv_n\n\n\n #========================================================================================#\n # ----------PROBLEM 2----------\n #========================================================================================#\n def policy_forward_pass(self, sy_ob_no):\n \"\"\" Constructs the symbolic operation for the policy network outputs,\n which are the parameters of the policy distribution p(a|s)\n\n arguments:\n sy_ob_no: (batch_size, self.ob_dim)\n\n returns:\n the parameters of the policy.\n\n if discrete, the parameters are the logits of a categorical distribution\n over the actions\n sy_logits_na: (batch_size, self.ac_dim)\n\n if continuous, the parameters are a tuple (mean, log_std) of a Gaussian\n distribution over actions. log_std should just be a trainable\n variable, not a network output.\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n Hint: use the 'build_mlp' function to output the logits (in the discrete case)\n and the mean (in the continuous case).\n Pass in self.n_layers for the 'n_layers' argument, and\n pass in self.size for the 'size' argument.\n \"\"\"\n\n if self.discrete:\n # YOUR_CODE_HERE\n sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'Discrete', self.n_layers, self.size)\n return sy_logits_na\n else:\n # YOUR_CODE_HERE\n sy_mean = build_mlp(sy_ob_no, self.ac_dim, 'Continuous_mean', self.n_layers, self.size)\n sy_logstd = tf.get_variable(name='Continuous_logstd', shape=[self.ac_dim,], dtype=tf.float32)\n return (sy_mean, sy_logstd)\n\n #========================================================================================#\n # ----------PROBLEM 2----------\n #========================================================================================#\n def sample_action(self, policy_parameters):\n \"\"\" Constructs a symbolic operation for stochastically sampling from the policy\n distribution\n\n arguments:\n policy_parameters\n if discrete: logits of a categorical distribution over actions \n sy_logits_na: (batch_size, self.ac_dim)\n if continuous: (mean, log_std) of a Gaussian distribution over actions\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n returns:\n sy_sampled_ac: \n if discrete: (batch_size,)\n if continuous: (batch_size, self.ac_dim)\n\n Hint: for the continuous case, use the reparameterization trick:\n The output from a Gaussian distribution with mean 'mu' and std 'sigma' is\n \n mu + sigma * z, z ~ N(0, I)\n \n This reduces the problem to just sampling z. (Hint: use tf.random_normal!)\n \"\"\"\n \n if self.discrete:\n sy_logits_na = policy_parameters\n # YOUR_CODE_HERE\n sy_sampled_ac = tf.reshape(tf.multinomial(sy_logits_na, 1), [-1])\n else:\n sy_mean, sy_logstd = policy_parameters\n # YOUR_CODE_HERE\n sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(shape=tf.shape(sy_mean))\n return sy_sampled_ac\n\n #========================================================================================#\n # ----------PROBLEM 2----------\n #========================================================================================#\n def get_log_prob(self, policy_parameters, sy_ac_na):\n \"\"\" Constructs a symbolic operation for computing the log probability of a set of actions\n that were actually taken according to the policy\n\n arguments:\n policy_parameters\n if discrete: logits of a categorical distribution over actions \n sy_logits_na: (batch_size, self.ac_dim)\n if continuous: (mean, log_std) of a Gaussian distribution over actions\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n sy_ac_na: \n if discrete: (batch_size,)\n if continuous: (batch_size, self.ac_dim)\n\n returns:\n sy_logprob_n: (batch_size)\n\n Hint:\n For the discrete case, use the log probability under a categorical distribution.\n For the continuous case, use the log probability under a multivariate gaussian.\n \"\"\"\n \n if self.discrete:\n sy_logits_na = policy_parameters\n # YOUR_CODE_HERE\n sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)\n else:\n sy_mean, sy_logstd = policy_parameters\n # YOUR_CODE_HERE\n sy_logprob_n = -0.5 * tf.reduce_sum(tf.square((sy_ac_na - sy_mean) / (tf.exp(sy_logstd) + 1e-8)), axis=1)\n \n return sy_logprob_n\n\n def build_computation_graph(self):\n \"\"\"\n Notes on notation:\n \n Symbolic variables have the prefix sy_, to distinguish them from the numerical values\n that are computed later in the function\n \n Prefixes and suffixes:\n ob - observation \n ac - action\n _no - this tensor should have shape (batch self.size /n/, observation dim)\n _na - this tensor should have shape (batch self.size /n/, action dim)\n _n - this tensor should have shape (batch self.size /n/)\n \n Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis\n is None\n\n ----------------------------------------------------------------------------------\n loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate\n to get the policy gradient.\n \"\"\"\n self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()\n\n # The policy takes in an observation and produces a distribution over the action space\n self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)\n\n # We can sample actions from this action distribution.\n # This will be called in Agent.sample_trajectory() where we generate a rollout.\n self.sy_sampled_ac = self.sample_action(self.policy_parameters)\n\n # We can also compute the logprob of the actions that were actually taken by the policy\n # This is used in the loss function.\n self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)\n\n #========================================================================================#\n # ----------PROBLEM 2----------\n # Loss Function and Training Operation\n #========================================================================================#\n self.loss = -tf.reduce_mean(tf.multiply(self.sy_logprob_n, self.sy_adv_n)) # YOUR CODE HERE\n self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n\n #========================================================================================#\n # ----------PROBLEM 6----------\n # Optional Baseline\n #\n # Define placeholders for targets, a loss function and an update op for fitting a \n # neural network baseline. These will be used to fit the neural network baseline. \n #========================================================================================#\n if self.nn_baseline:\n #raise NotImplementedError\n self.baseline_prediction = tf.squeeze(build_mlp(\n self.sy_ob_no, \n 1, \n \"nn_baseline\",\n n_layers=self.n_layers,\n size=self.size))\n # YOUR_CODE_HERE\n self.sy_target_n = tf.placeholder(shape=[None], name='sy_target_n', dtype=tf.float32)\n self.baseline_loss = tf.nn.l2_loss(self.baseline_prediction - self.sy_target_n)\n self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.baseline_loss)\n\n def sample_trajectories(self, itr, env):\n # Collect paths until we have enough timesteps\n timesteps_this_batch = 0\n paths = []\n while True:\n animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)\n path = self.sample_trajectory(env, animate_this_episode)\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n if timesteps_this_batch > self.min_timesteps_per_batch:\n break\n return paths, timesteps_this_batch\n\n def sample_trajectory(self, env, animate_this_episode):\n ob = env.reset()\n obs, acs, rewards = [], [], []\n steps = 0\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n obs.append(ob)\n #====================================================================================#\n # ----------PROBLEM 3----------\n #====================================================================================#\n \n ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no:ob[None]}) # YOUR CODE HERE\n ac = ac[0]\n acs.append(ac)\n ob, rew, done, _ = env.step(ac)\n rewards.append(rew)\n steps += 1\n if done or steps > self.max_path_length:\n break\n path = {\"observation\" : np.array(obs, dtype=np.float32), \n \"reward\" : np.array(rewards, dtype=np.float32), \n \"action\" : np.array(acs, dtype=np.float32)}\n return path\n\n #====================================================================================#\n # ----------PROBLEM 3----------\n #====================================================================================#\n def sum_of_rewards(self, re_n):\n \"\"\"\n Monte Carlo estimation of the Q function.\n\n let sum_of_path_lengths be the sum of the lengths of the paths sampled from \n Agent.sample_trajectories\n let num_paths be the number of paths sampled from Agent.sample_trajectories\n\n arguments:\n re_n: length: num_paths. Each element in re_n is a numpy array \n containing the rewards for the particular path\n\n returns:\n q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values \n whose length is the sum of the lengths of the paths\n\n ----------------------------------------------------------------------------------\n \n Your code should construct numpy arrays for Q-values which will be used to compute\n advantages (which will in turn be fed to the placeholder you defined in \n Agent.define_placeholders). \n \n Recall that the expression for the policy gradient PG is\n \n PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]\n \n where \n \n tau=(s_0, a_0, ...) is a trajectory,\n Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),\n and b_t is a baseline which may depend on s_t. \n \n You will write code for two cases, controlled by the flag 'reward_to_go':\n \n Case 1: trajectory-based PG \n \n (reward_to_go = False)\n \n Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over \n entire trajectory (regardless of which time step the Q-value should be for). \n \n For this case, the policy gradient estimator is\n \n E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]\n \n where\n \n Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.\n \n Thus, you should compute\n \n Q_t = Ret(tau)\n \n Case 2: reward-to-go PG \n \n (reward_to_go = True)\n \n Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting\n from time step t. Thus, you should compute\n \n Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}\n \n \n Store the Q-values for all timesteps and all trajectories in a variable 'q_n',\n like the 'ob_no' and 'ac_na' above. \n \"\"\"\n # YOUR_CODE_HERE\n q_n = []\n \n if self.reward_to_go:\n #raise NotImplementedError\n for rewards in re_n:\n path_num = np.shape(rewards)[0]\n path = np.zeros((path_num,))\n \n path[-1] = rewards[-1]\n for index in reversed(range(path_num-1)):\n path[index] = self.gamma * path[index+1] + rewards[index]\n \n q_n.extend(path)\n else:\n #raise NotImplementedError\n for rewards in re_n:\n path_num = np.shape(rewards)[0]\n power = np.arange(path_num)\n powers = np.power(self.gamma, power)\n q_n.extend(np.ones(path_num) * np.sum(np.multiply(rewards, powers)))\n \n return q_n\n\n def compute_advantage(self, ob_no, q_n):\n \"\"\"\n Computes advantages by (possibly) subtracting a baseline from the estimated Q values\n\n let sum_of_path_lengths be the sum of the lengths of the paths sampled from \n Agent.sample_trajectories\n let num_paths be the number of paths sampled from Agent.sample_trajectories\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values \n whose length is the sum of the lengths of the paths\n\n returns:\n adv_n: shape: (sum_of_path_lengths). A single vector for the estimated \n advantages whose length is the sum of the lengths of the paths\n \"\"\"\n #====================================================================================#\n # ----------PROBLEM 6----------\n # Computing Baselines\n #====================================================================================#\n if self.nn_baseline:\n # If nn_baseline is True, use your neural network to predict reward-to-go\n # at each timestep for each trajectory, and save the result in a variable 'b_n'\n # like 'ob_no', 'ac_na', and 'q_n'.\n #\n # Hint #bl1: rescale the output from the nn_baseline to match the statistics\n # (mean and std) of the current batch of Q-values. (Goes with Hint\n # #bl2 in Agent.update_parameters.\n \n b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no:ob_no}) # YOUR CODE HERE\n b_n = np.mean(q_n) + np.std(q_n) * ((b_n - np.mean(b_n)) / (np.std(b_n) + 1e-8))\n adv_n = q_n - b_n\n else:\n adv_n = q_n.copy()\n return adv_n\n\n def estimate_return(self, ob_no, re_n):\n \"\"\"\n Estimates the returns over a set of trajectories.\n\n let sum_of_path_lengths be the sum of the lengths of the paths sampled from \n Agent.sample_trajectories\n let num_paths be the number of paths sampled from Agent.sample_trajectories\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n re_n: length: num_paths. Each element in re_n is a numpy array \n containing the rewards for the particular path\n\n returns:\n q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values \n whose length is the sum of the lengths of the paths\n adv_n: shape: (sum_of_path_lengths). A single vector for the estimated \n advantages whose length is the sum of the lengths of the paths\n \"\"\"\n q_n = self.sum_of_rewards(re_n)\n #adv_n = self.compute_advantage(ob_no, q_n)\n \n # lambda-gae\n b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no:ob_no}) # YOUR CODE HERE\n b_n = np.mean(q_n) + np.std(q_n) * ((b_n - np.mean(b_n)) / (np.std(b_n) + 1e-8))\n \n adv_n = []\n idx = 0\n for rewards in re_n:\n adv = 0\n adv_path = []\n V_next = 0\n idx += len(rewards)\n\n # Dynamic programming over reversed path\n for rew, V in zip(reversed(rewards), b_n[idx-1:None:-1]):\n bellman_error = rew + self.gamma * V_next - V\n adv = bellman_error + self.gae_gamma * self.gamma * adv\n adv_path.append(adv)\n V_next = V\n adv_path.reverse()\n\n # Append these advantage values\n if not self.reward_to_go:\n adv_path = [adv_path[0]] * len(adv_path)\n adv_n.extend(adv_path)\n\n q_n = b_n + adv_n\n \n #====================================================================================#\n # ----------PROBLEM 3----------\n # Advantage Normalization\n #====================================================================================#\n if self.normalize_advantages:\n # On the next line, implement a trick which is known empirically to reduce variance\n # in policy gradient methods: normalize adv_n to have mean zero and std=1.\n #raise NotImplementedError\n adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-8) # YOUR_CODE_HERE\n return q_n, adv_n\n\n def update_parameters(self, ob_no, ac_na, q_n, adv_n):\n \"\"\" \n Update the parameters of the policy and (possibly) the neural network baseline, \n which is trained to approximate the value function.\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n ac_na: shape: (sum_of_path_lengths).\n q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values \n whose length is the sum of the lengths of the paths\n adv_n: shape: (sum_of_path_lengths). A single vector for the estimated \n advantages whose length is the sum of the lengths of the paths\n\n returns:\n nothing\n\n \"\"\"\n #====================================================================================#\n # ----------PROBLEM 6----------\n # Optimizing Neural Network Baseline\n #====================================================================================#\n if self.nn_baseline:\n # If a neural network baseline is used, set up the targets and the inputs for the \n # baseline. \n # \n # Fit it to the current batch in order to use for the next iteration. Use the \n # baseline_update_op you defined earlier.\n #\n # Hint #bl2: Instead of trying to target raw Q-values directly, rescale the \n # targets to have mean zero and std=1. (Goes with Hint #bl1 in \n # Agent.compute_advantage.)\n\n # YOUR_CODE_HERE\n target_n = ((q_n - np.mean(q_n)) / (np.std(q_n) + 1e-8)) \n feed = {self.sy_ob_no:ob_no, self.sy_target_n:target_n}\n _ = self.sess.run(self.baseline_update_op, feed)\n \n\n #====================================================================================#\n # ----------PROBLEM 3----------\n # Performing the Policy Update\n #====================================================================================#\n\n # Call the update operation necessary to perform the policy gradient update based on \n # the current batch of rollouts.\n # \n # For debug purposes, you may wish to save the value of the loss function before\n # and after an update, and then log them below. \n\n # YOUR_CODE_HERE\n #raise NotImplementedError\n feed = {self.sy_ob_no:ob_no, self.sy_ac_na:ac_na, self.sy_adv_n:adv_n}\n loss, _ = self.sess.run([self.loss, self.update_op], feed)\n \n logz.log_tabular(\"Loss\", loss)\n \ndef train_PG(\n exp_name,\n env_name,\n n_iter, \n gamma, \n min_timesteps_per_batch, \n max_path_length,\n learning_rate, \n reward_to_go, \n animate, \n logdir, \n normalize_advantages,\n nn_baseline, \n seed,\n n_layers,\n size,\n gae_gamma):\n\n start = time.time()\n\n #========================================================================================#\n # Set Up Logger\n #========================================================================================#\n setup_logger(logdir, locals())\n\n #========================================================================================#\n # Set Up Env\n #========================================================================================#\n\n # Make the gym environment\n env = gym.make(env_name)\n\n # Set random seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n env.seed(seed)\n\n # Maximum length for episodes\n max_path_length = max_path_length or env.spec.max_episode_steps\n\n # Is this env continuous, or self.discrete?\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n #========================================================================================#\n # Initialize Agent\n #========================================================================================#\n computation_graph_args = {\n 'n_layers': n_layers,\n 'ob_dim': ob_dim,\n 'ac_dim': ac_dim,\n 'discrete': discrete,\n 'size': size,\n 'learning_rate': learning_rate,\n }\n\n sample_trajectory_args = {\n 'animate': animate,\n 'max_path_length': max_path_length,\n 'min_timesteps_per_batch': min_timesteps_per_batch,\n }\n\n estimate_return_args = {\n 'gamma': gamma,\n 'reward_to_go': reward_to_go,\n 'nn_baseline': nn_baseline,\n 'normalize_advantages': normalize_advantages,\n 'gae_gamma': gae_gamma,\n }\n\n agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)\n\n # build computation graph\n agent.build_computation_graph()\n\n # tensorflow: config, session, variable initialization\n agent.init_tf_sess()\n\n #========================================================================================#\n # Training Loop\n #========================================================================================#\n\n total_timesteps = 0\n for itr in range(n_iter):\n print(\"********** Iteration %i ************\"%itr)\n paths, timesteps_this_batch = agent.sample_trajectories(itr, env)\n total_timesteps += timesteps_this_batch\n\n # Build arrays for observation, action for the policy gradient update by concatenating \n # across paths\n ob_no = np.concatenate([path[\"observation\"] for path in paths])\n ac_na = np.concatenate([path[\"action\"] for path in paths])\n re_n = [path[\"reward\"] for path in paths]\n\n q_n, adv_n = agent.estimate_return(ob_no, re_n)\n agent.update_parameters(ob_no, ac_na, q_n, adv_n)\n\n # Log diagnostics\n returns = [path[\"reward\"].sum() for path in paths]\n ep_lengths = [pathlength(path) for path in paths]\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", itr)\n logz.log_tabular(\"AverageReturn\", np.mean(returns))\n logz.log_tabular(\"StdReturn\", np.std(returns))\n logz.log_tabular(\"MaxReturn\", np.max(returns))\n logz.log_tabular(\"MinReturn\", np.min(returns))\n logz.log_tabular(\"EpLenMean\", np.mean(ep_lengths))\n logz.log_tabular(\"EpLenStd\", np.std(ep_lengths))\n logz.log_tabular(\"TimestepsThisBatch\", timesteps_this_batch)\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n logz.dump_tabular()\n logz.pickle_tf_vars()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('--exp_name', type=str, default='vpg')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--discount', type=float, default=1.0)\n parser.add_argument('--n_iter', '-n', type=int, default=100)\n parser.add_argument('--batch_size', '-b', type=int, default=1000)\n parser.add_argument('--ep_len', '-ep', type=float, default=-1.)\n parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)\n parser.add_argument('--reward_to_go', '-rtg', action='store_true')\n parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')\n parser.add_argument('--nn_baseline', '-bl', action='store_true')\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--n_experiments', '-e', type=int, default=1)\n parser.add_argument('--n_layers', '-l', type=int, default=2)\n parser.add_argument('--size', '-s', type=int, default=64)\n parser.add_argument('--gae_gamma', type=float, default=1.0)\n args = parser.parse_args()\n\n if not(os.path.exists('data')):\n os.makedirs('data')\n logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join('data', logdir)\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n max_path_length = args.ep_len if args.ep_len > 0 else None\n\n processes = []\n\n for e in range(args.n_experiments):\n seed = args.seed + 10*e\n print('Running experiment with seed %d'%seed)\n\n def train_func():\n train_PG(\n exp_name=args.exp_name,\n env_name=args.env_name,\n n_iter=args.n_iter,\n gamma=args.discount,\n min_timesteps_per_batch=args.batch_size,\n max_path_length=max_path_length,\n learning_rate=args.learning_rate,\n reward_to_go=args.reward_to_go,\n animate=args.render,\n logdir=os.path.join(logdir,'%d'%seed),\n normalize_advantages=not(args.dont_normalize_advantages),\n nn_baseline=args.nn_baseline, \n seed=seed,\n n_layers=args.n_layers,\n size=args.size,\n gae_gamma=args.gae_gamma\n )\n # # Awkward hacky process runs, because Tensorflow does not like\n # # repeatedly calling train_PG in the same thread.\n p = Process(target=train_func, args=tuple())\n p.start()\n processes.append(p)\n # if you comment in the line below, then the loop will block \n # until this process finishes\n # p.join()\n\n for p in processes:\n p.join()\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"tensorflow.exp",
"tensorflow.multinomial",
"numpy.min",
"numpy.mean",
"numpy.multiply",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"numpy.concatenate",
"numpy.max",
"tensorflow.shape",
"tensorflow.ConfigProto",
"tensorflow.variable_scope",
"numpy.arange",
"tensorflow.layers.dense",
"numpy.array",
"tensorflow.train.AdamOptimizer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.nn.l2_loss",
"numpy.shape",
"numpy.std",
"tensorflow.placeholder",
"tensorflow.get_variable",
"numpy.power",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.multiply",
"numpy.random.seed",
"numpy.ones"
]
]
|
JanMatas/baselines | [
"410f51503c2ecb02ac14b723fa5548fd5a41730d"
]
| [
"baselines/asym_ddpg/main.py"
]
| [
"import argparse\nimport time\nfrom baselines import logger, bench\nfrom baselines.common.misc_util import (\n set_global_seeds,\n boolean_flag,\n)\nimport baselines.asym_ddpg.distributed_train as training\nfrom baselines.asym_ddpg.models import Actor, Critic\nfrom baselines.asym_ddpg.prioritized_memory import PrioritizedMemory\nfrom baselines.asym_ddpg.noise import *\nimport gym\nimport tensorflow as tf\nfrom mpi4py import MPI\nimport learning.demo_policies as demo\n\ndef run(env_id, eval_env_id, seed, noise_type, evaluation,demo_policy,use_velocities, num_dense_layers, dense_layer_size, layer_norm,demo_epsilon,replay_alpha,conv_size,cloth, **kwargs):\n # Configure things.\n if use_velocities:\n assert \"velos\" in env_id\n rank = MPI.COMM_WORLD.Get_rank()\n if rank != 0:\n logger.set_level(logger.DISABLED)\n\n # Create envs.\n env = gym.make(env_id)\n if isinstance(env.reset(),dict) :\n print (\"wrapping env\")\n env = gym.wrappers.FlattenDictWrapper(\n env, dict_keys=['observation', 'desired_goal'])\n # env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))\n\n if evaluation and rank==0:\n eval_env = gym.make(eval_env_id)\n if isinstance(eval_env.reset(),dict) :\n print (\"wrapping env\")\n eval_env = gym.wrappers.FlattenDictWrapper(\n eval_env, dict_keys=['observation', 'desired_goal'])\n # eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval'))\n else:\n eval_env = None\n\n # Parse noise_type\n action_noise = None\n nb_actions = env.action_space.shape[-1]\n for current_noise_type in noise_type.split(','):\n current_noise_type = current_noise_type.strip()\n if current_noise_type == 'none':\n pass\n elif 'normal' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))\n elif 'ou' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))\n else:\n raise RuntimeError('unknown noise type \"{}\"'.format(current_noise_type))\n\n # Configure components.\n\n #TODO:\n\n #memory = Memory(limit=int(1e4 * 5))\n memory = PrioritizedMemory(limit=int(1e4 * 5), alpha=replay_alpha, demo_epsilon=demo_epsilon)\n critic = Critic(num_dense_layers, dense_layer_size, layer_norm)\n actor = Actor(nb_actions, env.state_space.shape[0], num_dense_layers, dense_layer_size, layer_norm, conv_size=conv_size, cloth=cloth)\n\n # Seed everything to make things reproducible.\n seed = seed + 1000000 * rank\n logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir()))\n tf.reset_default_graph()\n set_global_seeds(seed)\n env.seed(seed)\n if eval_env is not None:\n eval_env.seed(seed)\n\n # Disable logging for rank != 0 to avoid noise.\n if rank == 0:\n start_time = time.time()\n\n demo_env = gym.make(env_id)\n demo_policy_object = None\n if demo.policies[demo_policy]:\n demo_policy_object = demo.policies[demo_policy]()\n if use_velocities:\n demo_policy_object = demo.VelocityWrapper(demo_policy_object,demo_env)\n eval_avg = training.train(env=env,env_id=env_id, eval_env=eval_env, action_noise=action_noise, actor=actor, critic=critic, memory=memory, demo_policy=demo_policy_object, demo_env=demo_env, cloth=cloth, **kwargs)\n env.close()\n if eval_env is not None:\n eval_env.close()\n if rank == 0:\n logger.info('total runtime: {}s'.format(time.time() - start_time))\n return eval_avg\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--env-id', type=str, default='MicoEnv-pusher_fix-pixels-sparse-ik-acton-v1')\n parser.add_argument('--eval-env-id', type=str, default='')\n boolean_flag(parser, 'render-eval', default=True)\n boolean_flag(parser, 'render-demo', default=True)\n boolean_flag(parser, 'layer-norm', default=False)\n boolean_flag(parser, 'render', default=False)\n boolean_flag(parser, 'use-velocities', default=False)\n boolean_flag(parser, 'normalize-observations', default=True)\n boolean_flag(parser, 'normalize-state', default=True)\n boolean_flag(parser, 'normalize-aux', default=True)\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--critic-l2-reg', type=float, default=1e-2)\n parser.add_argument('--batch-size', type=int, default=64)\n parser.add_argument('--actor-lr', type=float, default=1e-4)\n parser.add_argument('--critic-lr', type=float, default=1e-3)\n boolean_flag(parser, 'popart', default=False)\n parser.add_argument('--gamma', type=float, default=0.999)\n parser.add_argument('--reward-scale', type=float, default=1.)\n parser.add_argument('--clip-norm', type=float, default=None)\n parser.add_argument('--nb-epochs', type=int, default=500)\n parser.add_argument('--nb-epoch-cycles', type=int, default=20)\n parser.add_argument('--nb-train-steps', type=int, default=50)\n parser.add_argument('--nb-eval-steps', type=int, default=2)\n parser.add_argument('--nb-rollout-steps', type=int, default=100)\n parser.add_argument('--noise-type', type=str, default='normal_0.2')\n parser.add_argument('--load-file', type=str, default='')\n parser.add_argument('--save-folder', type=str, default='')\n parser.add_argument('--conv-size', type=str, default='small') \n parser.add_argument('--num-timesteps', type=int, default=None)\n parser.add_argument('--num-demo-steps', type=int, default=20)\n parser.add_argument('--num-pretrain-steps', type=int, default=2000)\n parser.add_argument('--run-name', type=str, default='ignore')\n parser.add_argument('--demo-policy', type=str, default='pusher')\n parser.add_argument('--lambda-pretrain', type=float, default=5.0)\n parser.add_argument('--lambda-nstep', type=float, default=0.5)\n parser.add_argument('--lambda-1step', type=float, default=1.0)\n parser.add_argument('--replay-beta', type=float, default=0.4)\n parser.add_argument('--reset-to-demo-rate', type=float, default=0.9)\n parser.add_argument('--tau', type=float, default=0.01)\n parser.add_argument('--target-policy-noise', type=float, default=0.0)\n parser.add_argument('--target-policy-noise-clip', type=float, default=0.0)\n parser.add_argument('--policy-and-target-update-period', type=int, default=2)\n parser.add_argument('--dense-layer-size', type=int, default=256)\n parser.add_argument('--num-dense-layers', type=int, default=4)\n parser.add_argument('--num-critics', type=int, default=2)\n parser.add_argument('--nsteps', type=int, default=10)\n parser.add_argument('--demo-terminality', type=int, default=5)\n parser.add_argument('--replay-alpha', type=float, default=0.8)\n parser.add_argument('--demo-epsilon', type=float, default=0.2)\n parser.add_argument('--lambda-obj-conf-predict', type=float, default=500000.0)\n parser.add_argument('--lambda-target-predict', type=float, default=500000.0)\n parser.add_argument('--lambda-gripper-predict', type=float, default=500000.0)\n\n boolean_flag(parser, 'positive-reward', default=True)\n boolean_flag(parser, 'only-eval', default=False)\n boolean_flag(parser, 'cloth', default=True)\n\n\n boolean_flag(parser, 'evaluation', default=True)\n\n\n args = parser.parse_args()\n\n\n # we don't directly specify timesteps for this script, so make sure that if we do specify them\n # they agree with the other parameters\n if args.num_timesteps is not None:\n assert(args.num_timesteps == args.nb_epochs * args.nb_epoch_cycles * args.nb_rollout_steps)\n dict_args = vars(args)\n del dict_args['num_timesteps']\n return dict_args\n\n\nif __name__ == '__main__':\n args = parse_args()\n if not args[\"eval_env_id\"]:\n args [\"eval_env_id\"] = args[\"env_id\"]\n if MPI.COMM_WORLD.Get_rank() == 0:\n logger.configure()\n # Run actual script.\n run(**args)\n"
]
| [
[
"tensorflow.reset_default_graph"
]
]
|
charlesccychen/tfx0 | [
"6d3e777e15fd7ffe14cc803069db62dfd1fe0008"
]
| [
"tfx/components/example_gen/utils_test.py"
]
| [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.example_gen.utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Standard Imports\n\nimport tensorflow as tf\nfrom tfx.components.example_gen import utils\nfrom tfx.proto import example_gen_pb2\n\n\nclass UtilsTest(tf.test.TestCase):\n\n def test_dict_to_example(self):\n instance_dict = {\n 'int': 10,\n 'float': 5.0,\n 'str': 'abc',\n 'int_list': [1, 2],\n 'float_list': [3.0],\n 'str_list': ['ab', 'cd'],\n 'none': None,\n 'empty_list': [],\n }\n example = utils.dict_to_example(instance_dict)\n self.assertProtoEquals(\n \"\"\"\n features {\n feature {\n key: \"empty_list\"\n value {\n }\n }\n feature {\n key: \"float\"\n value {\n float_list {\n value: 5.0\n }\n }\n }\n feature {\n key: \"float_list\"\n value {\n float_list {\n value: 3.0\n }\n }\n }\n feature {\n key: \"int\"\n value {\n int64_list {\n value: 10\n }\n }\n }\n feature {\n key: \"int_list\"\n value {\n int64_list {\n value: 1\n value: 2\n }\n }\n }\n feature {\n key: \"none\"\n value {\n }\n }\n feature {\n key: \"str\"\n value {\n bytes_list {\n value: \"abc\"\n }\n }\n }\n feature {\n key: \"str_list\"\n value {\n bytes_list {\n value: \"ab\"\n value: \"cd\"\n }\n }\n }\n }\n \"\"\", example)\n\n def test_make_output_split_names(self):\n split_names = utils.generate_output_split_names(\n input_config=example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(name='train', pattern='train/*'),\n example_gen_pb2.Input.Split(name='eval', pattern='eval/*')\n ]),\n output_config=example_gen_pb2.Output())\n self.assertListEqual(['train', 'eval'], split_names)\n\n split_names = utils.generate_output_split_names(\n input_config=example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(name='single', pattern='single/*')\n ]),\n output_config=example_gen_pb2.Output(\n split_config=example_gen_pb2.SplitConfig(splits=[\n example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=2),\n example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)\n ])))\n self.assertListEqual(['train', 'eval'], split_names)\n\n def test_make_default_output_config(self):\n output_config = utils.make_default_output_config(\n utils.make_default_input_config())\n self.assertEqual(2, len(output_config.split_config.splits))\n\n output_config = utils.make_default_output_config(\n example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(name='train', pattern='train/*'),\n example_gen_pb2.Input.Split(name='eval', pattern='eval/*')\n ]))\n self.assertEqual(0, len(output_config.split_config.splits))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
]
| [
[
"tensorflow.test.main"
]
]
|
Mailaender/spectrochempy | [
"d58221afeb9f78e2e3e0079b3fd6c0162a902c04"
]
| [
"spectrochempy/application.py"
]
| [
"# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France.\n# =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory\n# =\n# ======================================================================================================================\n\"\"\"\nThis module define the `application` on which the API rely.\n\nIt also define\nthe default application preferences and IPython magic functions.\n\"\"\"\n\n__all__ = []\n\nimport re\nimport sys\nimport logging\nimport subprocess\nimport datetime\nimport warnings\nimport pprint\nimport json\nfrom os import environ\nfrom pathlib import Path\nimport threading\n\nfrom pkg_resources import parse_version, get_distribution, DistributionNotFound\nimport requests\nfrom setuptools_scm import get_version\nfrom traitlets.config.configurable import Config\nfrom traitlets.config.application import Application\nfrom traitlets import (\n Bool,\n Unicode,\n List,\n Integer,\n Enum,\n Union,\n HasTraits,\n Instance,\n default,\n observe,\n)\nfrom traitlets.config.manager import BaseJSONConfigManager\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom IPython import get_ipython\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython.core.magic import Magics, magics_class, line_cell_magic\nfrom IPython.core.magics.code import extract_symbols\nfrom IPython.core.error import UsageError\nfrom IPython.utils.text import get_text_list\nfrom IPython.display import publish_display_data, clear_output\nfrom jinja2 import Template\n\nfrom spectrochempy.utils import MetaConfigurable, pathclean, get_pkg_path\nfrom .plot_preferences import PlotPreferences\n\n# set the default style\nplt.style.use([\"classic\"])\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Log levels\n# ----------------------------------------------------------------------------------------------------------------------\n\nDEBUG = logging.DEBUG\nINFO = logging.INFO\nWARNING = logging.WARNING\nERROR = logging.ERROR\nCRITICAL = logging.CRITICAL\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# logo / copyright display\n# ----------------------------------------------------------------------------------------------------------------------\n\n\ndef display_info_string(**kwargs): # pragma: no cover\n _template = \"\"\"\n {{widgetcss}}\n <table><tr><td>\n {% if logo %}\n <img src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAAlw\n SFlzAAAJOgAACToB8GSSSgAAAetpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6\n bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8x\n OTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAg\n eG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMu\n YWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+bWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\n dHRwOi8vbWF0cGxvdGxpYi5vcmcvPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6\n T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgqNQaNYAAAGiUlE\n QVRIDY1We4xU1Rn/3XPuYx47u8w+hnU38hTcuoUEt/6D2y4RB0ME1BoEd9taJaKh9CFiN7YGp7appUAMNmktMZFoJTYVLVQ0smsy\n 26CN0SU1QgsuFAaW3WVmx33N677O6XfuyoIxTXqSO/fec+75fd93vt/3/UbDV0aKSZmCpkFMLz3T9utuu2N+o98aDSMBKVAo89z5\n y+zEz3ZafcCOfvWdlGCalqKn1Bf71CygTd+mf1esSOnpdMpTb+vWpTZuWVfe3jLPa5tzHYNm0T5N0gpdkkHaDBeGBU6d1/t/fyS8\n +/CbqdfUvmsx1PuMgc2bNxv79u1zgd31r+7JH1jbIZKxWRXAcYUQ8IWvBfBXNjEuJWPgMA02NR7C3/pYT9fjdZ3A9tGrWF8YSJHn\n qcDz3y7q2T967PZv+gnYJdd1mEZ+62zGDQV/dQgKhmLzDNOXCEWM3j6eTT5Y3w78dOBKJLR1PQf+4ivPj76UPZnssBN+wbM9Aet/\n AV81Mf1EEULXYfOobvX2WWQk0aoioXwwSmirOlioY0mu8BIouzYl7P8GV3vpqCCEZvlFz769w08oLDWvyKIyL1asSm28d6WfzA97\n ztvvV1kexUMsmhlkULEkuGYmFYC6AvfUrITnwUKl5K79lkjeSSRRTCTbQPd95e1WzMbZSya74XoXAxctCllCnbECMOjZNGRwvzIX\n nD85wbkMmKK+U045Dtdi8Qp+SAxU2GTg2bYlC9224pgvmSb54vkVTBQYyhUt2KjAMyMmPjwRQW5Mh2WKwJhlBh6jVGagFM84wZnQ\n 4bpC0Rt4pk1PbSt0NDcxDA5xryosDHWgtbM0DGZDWLSoiDMDYeQnGVrmOThxLozB0RAaahzkJzjKNqcIQBymJFMkOlN8Dqjpg0XY\n Tx5xO/QbmmUrqIjGJznq47TqTaClKYfjp+PInLMwnOdYvtQBZ2XcunQY+VwIo4U4muoFEjVEFE6lQyEUKzHYfgQG9ylCyngU+Cxj\n tOqxCDGHcCsOMCs6iQul5ZiStdATYxjMZXDLTUVwLY8Jey4uOh2IxjwsrP8UXJYxUrkZrghBahzV5iXU6gNkq0Z1EzIsUBUSCV2n\n EOHo0LVxHCpuxabJJdhi5PFnvw5vLXwXIfNZvD/+JNo/X40NegE54sUaazl+UL8XD1x+FB9Ijjt4EQfdGN6J/x131LwIV9ap/AYs\n 0x1fz1ZKFbh6A7qKy/By9Dg6G36Ep91vUJJ15Cqr0Z67E8/HzmBrw1OwxWyM+3Mo6BAuSB17oyfx0Oyl2DN0Hqs/70Cx6hBCvESF\n UY1ShWXZZEE7OTAYxZzaPH4TuoiusZvRnunFy2NbiHYuBp2vB66srX4vMEjpRKPxKXmnoQ4+Mn4DPiv8CYcrs3GfNUXJLtM+alSO\n hrMj/KT+wBNW3+E/2liywNO3iSflbaFva/+stGDTxE0E9Sjaox8HBhxpEamzMGSEaFKg+mjEddzDh1MxTDq3YV1kGBsjfwW3S9Cq\n anjmko+ndlb1UR3s6K8JlfphNWq9Ew/7c61T2BB/EbcaNkb8GBaE0tANH7/M34PLdhJDzjIcL9xPbdTG6zyM72Y+wXPHmvB489No\n fm0b5HnbQ9Rgp/7DSSd29AeVvPeNyK6JcYl/yQVi5dBjuGvoV/gaJe47s45QUxrDmcYX0MBsdF7egvXZ7+O0vZA4X8QmOQWjlSK7\n RDz5wIM30gp9UbWcGjXxhzdDu1SiNSpx6kcQB57rPnr/3dlkZarWLnlRq5oPET1dOCIOk4wALib9eeS5iygfhkd09H0DWphB/+gs\n +PcOAS+ssrFmmXXgVfR0de9cpbAJfH3Q1jofW9DZk56dDcVsq9YcsoUMEd1qyLoT3BX1YiyHMJuk97hyjqIoE91t+NcTLeN0ZrfM\n oXatZbu6G0h4VG+ibqq0IJVK6cAjo6serG3vSUezCMct0yQeSOFJSUImqb2qbknUpDqlZxE0QZ+ZUpSlZx79h4Nda6zef9dlk121\n JDjbR5XggPRZlRnS6bRQRtLpn4++cuie/Yvn2svmNxuLw9WCcYIl4fEoTEGiSTUqJdfgU+8ROqf1iMkLzS389YtNPXc/PH8l8ONB\n JZkHD+4JtD04HmVEDWWErmBhzV2/2LB1bemJG6krzv2S6NOHUgtEP0Oif5pE/3fHoruP7N8RiP61GArzSwbUhJJQpXJKiKbfr/3b\n IhKq76sKPUdF9NW/LSqfSn6vjv8C45H/6FSgvZQAAAAASUVORK5CYII='\n style='height:25px; border-radius:12px; display:inline-block; float:left; vertical-align:middle'></img>\n {% endif %}\n </td><td>\n {% if message %}\n <span style='font-size:12px'>{{ message }}</span>\n {% endif %}\n </td></tr></table>\n </div>\n \"\"\"\n\n clear_output()\n\n logo = kwargs.get(\"logo\", True)\n message = kwargs.get(\"message\", \"info \")\n\n template = Template(_template)\n html = template.render(\n {\"logo\": logo, \"message\": message.strip().replace(\"\\n\", \"<br/>\")}\n )\n publish_display_data(data={\"text/html\": html})\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Version\n# ----------------------------------------------------------------------------------------------------------------------\ntry:\n __release__ = get_distribution(\"spectrochempy\").version.split(\"+\")[0]\n \"Release version string of this package\"\nexcept DistributionNotFound: # pragma: no cover\n # package is not installed\n __release__ = \"--not set--\"\n\ntry:\n __version__ = get_version(root=\"..\", relative_to=__file__)\n \"Version string of this package\"\nexcept LookupError: # pragma: no cover\n __version__ = __release__\n\n\n# ............................................................................\ndef _get_copyright():\n current_year = datetime.date.today().year\n right = \"2014-{}\".format(current_year)\n right += \" - A.Travert & C.Fernandez @ LCS\"\n return right\n\n\n__copyright__ = _get_copyright()\n\"Copyright string of this package\"\n\n\n# .............................................................................\ndef _get_release_date():\n return subprocess.getoutput(\"git log -1 --tags --date=short --format='%ad'\")\n\n\n__release_date__ = _get_release_date()\n\"Last release date of this package\"\n\n\ndef _check_for_updates(*args, **kwargs):\n # Get version\n conda_url = \"https://anaconda.org/spectrocat/spectrochempy/files\"\n try:\n response = requests.get(conda_url)\n except requests.exceptions.RequestException: # pragma: no cover\n return None\n\n regex = (\n r\"\\/\\d{1,2}\\.\\d{1,2}\\.\\d{1,2}\\/download\\/noarch\"\n r\"\\/spectrochempy-(\\d{1,2}\\.\\d{1,2}\\.\\d{1,2})\\-(dev\\d{1,2}|stable).tar.bz2\"\n )\n matches = re.finditer(regex, response.text, re.MULTILINE)\n vavailables = []\n for matchNum, match in enumerate(matches):\n v = match[1]\n if match[2] == \"stable\":\n vavailables.append(v)\n\n old = parse_version(__version__)\n\n new_version = None\n for key in vavailables:\n new = parse_version(key)\n if new > old: # pragma: no cover\n new_version = key\n\n fi = Path.home() / \".scpy_update\"\n if new_version: # pragma: no cover\n fi.write_text(\n f\"\\n\\n\\tYou are running SpectrocChemPy-{__version__} but version {new_version} is available.\"\n f\"\\n\\tPlease consider updating for bug fixes and new features! \"\n )\n\n else: # pragma: no cover\n if fi.exists():\n fi.unlink()\n\n\nCHECK_UPDATE = threading.Thread(target=_check_for_updates, args=(1,))\nCHECK_UPDATE.start()\n\n# other info\n# ............................................................................\n\n__url__ = \"https://www.spectrochempy.fr\"\n\"URL for the documentation of this package\"\n\n__author__ = \"C. Fernandez & A. Travert\"\n\"First authors(s) of this package\"\n\n__contributor__ = \"A. Ait Blal, W. Guérin\"\n\"contributor(s) to this package\"\n\n__license__ = \"CeCILL-B license\"\n\"Licence of this package\"\n\n__cite__ = (\n f\"Arnaud Travert & Christian Fernandez (2021) SpectroChemPy (version\"\n f\" {'.'.join(__version__.split('.')[0:2])}). \"\n f\"Zenodo. https://doi.org/10.5281/zenodo.3823841\"\n)\n\"How to cite this package\"\n\n\n# ..................................................................................................................\ndef _find_or_create_spectrochempy_dir():\n directory = Path.home() / \".spectrochempy\"\n\n directory.mkdir(exist_ok=True) # Create directory only if it do not exist\n\n if directory.is_file(): # pragma: no cover\n msg = \"Intended SpectroChemPy directory `{0}` is \" \"actually a file.\"\n raise IOError(msg.format(directory))\n\n return directory\n\n\n# ======================================================================================================================\n# Magic ipython function\n# ======================================================================================================================\n@magics_class\nclass SpectroChemPyMagics(Magics):\n \"\"\"\n This class implements the addscript ipython magic function.\n \"\"\"\n\n @line_cell_magic\n def addscript(self, pars=\"\", cell=None):\n \"\"\"\n This works both as **%addscript** and as **%%addscript**\n\n This magic command can either take a local filename, element in the\n namespace or history range (see %history),\n or the current cell content\n\n\n Usage:\n\n %addscript -p project n1-n2 n3-n4 ... n5 .. n6 ...\n\n or\n\n %%addscript -p project\n ...code lines ...\n\n\n Options:\n\n -p <string> Name of the project where the script will be stored.\n If not provided, a project with a standard\n name : `proj` is searched.\n -o <string> script name\n -s <symbols> Specify function or classes to load from python\n source.\n -a append to the current script instead of\n overwriting it.\n -n search symbol in the current namespace\n\n\n Examples\n --------\n\n .. sourcecode:: ipython\n\n In[1]: %addscript myscript.py\n\n In[2]: %addscript 7-27\n\n In[3]: %addscript -s MyClass,myfunction myscript.py\n In[4]: %addscript MyClass\n\n In[5]: %addscript mymodule.myfunction\n \"\"\"\n opts, args = self.parse_options(pars, \"p:o:s:n:a\")\n\n # append = 'a' in opts\n # mode = 'a' if append else 'w'\n search_ns = \"n\" in opts\n\n if not args and not cell and not search_ns: # pragma: no cover\n raise UsageError(\n \"Missing filename, input history range, \"\n \"or element in the user namespace.\\n \"\n \"If no argument are given then the cell content \"\n \"should \"\n \"not be empty\"\n )\n name = \"script\"\n if \"o\" in opts:\n name = opts[\"o\"]\n\n proj = \"proj\"\n if \"p\" in opts:\n proj = opts[\"p\"]\n if proj not in self.shell.user_ns: # pragma: no cover\n raise ValueError(\n \"Cannot find any project with name `{}` in the \"\n \"namespace.\".format(proj)\n )\n # get the proj object\n projobj = self.shell.user_ns[proj]\n\n contents = \"\"\n if search_ns:\n contents += (\n \"\\n\" + self.shell.find_user_code(opts[\"n\"], search_ns=search_ns) + \"\\n\"\n )\n\n args = \" \".join(args)\n if args.strip():\n contents += (\n \"\\n\" + self.shell.find_user_code(args, search_ns=search_ns) + \"\\n\"\n )\n\n if \"s\" in opts: # pragma: no cover\n try:\n blocks, not_found = extract_symbols(contents, opts[\"s\"])\n except SyntaxError:\n # non python code\n logging.error(\"Unable to parse the input as valid Python code\")\n return\n\n if len(not_found) == 1:\n warnings.warn(\"The symbol `%s` was not found\" % not_found[0])\n elif len(not_found) > 1:\n warnings.warn(\n \"The symbols %s were not found\"\n % get_text_list(not_found, wrap_item_with=\"`\")\n )\n\n contents = \"\\n\".join(blocks)\n\n if cell:\n contents += \"\\n\" + cell\n\n # import delayed to avoid circular import error\n from spectrochempy.core.scripts.script import Script\n\n script = Script(name, content=contents)\n projobj[name] = script\n\n return \"Script {} created.\".format(name)\n\n # @line_magic # def runscript(self, pars=''): # \"\"\" # # # \"\"\" # opts,\n # args = self.parse_options(pars, '') # # if # not args: # raise UsageError('Missing script\n # name') # # # return args\n\n\n# ======================================================================================================================\n# DataDir class\n# ======================================================================================================================\n\n\nclass DataDir(HasTraits):\n \"\"\"A class used to determine the path to the testdata directory.\"\"\"\n\n path = Instance(Path)\n\n @default(\"path\")\n def _get_path_default(self, **kwargs): # pragma: no cover\n\n super().__init__(**kwargs)\n\n # create a directory testdata in .spectrochempy to avoid an error if the following do not work\n path = _find_or_create_spectrochempy_dir() / \"testdata\"\n path.mkdir(exist_ok=True)\n\n # try to use the conda installed testdata (spectrochempy_data package)\n try:\n conda_env = environ[\"CONDA_PREFIX\"]\n path = Path(conda_env) / \"share\" / \"spectrochempy_data\" / \"testdata\"\n if not path.exists():\n path = (\n Path(conda_env) / \"share\" / \"spectrochempy_data\"\n ) # depending on the version of spectrochempy_data\n except KeyError:\n pass\n\n return path\n\n def listing(self):\n \"\"\"\n Create a str representing a listing of the testdata folder.\n\n Returns\n -------\n listing : str\n Display of the datadir content\n \"\"\"\n strg = f\"{self.path.name}\\n\" # os.path.basename(self.path) + \"\\n\"\n\n def _listdir(s, initial, ns):\n ns += 1\n for f in pathclean(initial).glob(\n \"*\"\n ): # glob.glob(os.path.join(initial, '*')):\n fb = f.name # os.path.basename(f)\n if fb.startswith(\".\"): # pragma: no cover\n continue\n if (\n not fb.startswith(\"acqu\")\n and not fb.startswith(\"pulse\")\n and fb not in [\"ser\", \"fid\"]\n ):\n s += \" \" * ns + \"|__\" + \"%s\\n\" % fb\n if f.is_dir():\n s = _listdir(s, f, ns)\n return s\n\n return _listdir(strg, self.path, -1)\n\n @classmethod\n def class_print_help(cls):\n # to work with --help-all\n \"\"\"\"\"\" # TODO: make some useful help\n\n def __str__(self):\n return self.listing()\n\n def _repr_html_(self): # pragma: no cover\n # _repr_html is needed to output in notebooks\n return self.listing().replace(\"\\n\", \"<br/>\").replace(\" \", \" \")\n\n\n# ======================================================================================================================\n# General Preferences\n# ======================================================================================================================\n\n\nclass GeneralPreferences(MetaConfigurable):\n \"\"\"\n Preferences that apply to the |scpy| application in general\n\n They should be accessible from the main API\n \"\"\"\n\n name = Unicode(\"GeneralPreferences\")\n description = Unicode(\"General options for the SpectroChemPy application\")\n updated = Bool(False)\n\n # ------------------------------------------------------------------------------------------------------------------\n # Configuration entries\n # ------------------------------------------------------------------------------------------------------------------\n\n # NON GUI\n show_info_on_loading = Bool(True, help=\"Display info on loading\").tag(config=True)\n use_qt = Bool(\n False,\n help=\"Use QT for dialog instead of TK which is the default. \"\n \"If True the PyQt libraries must be installed\",\n ).tag(config=True)\n\n # GUI\n databases_directory = Union(\n (Instance(Path), Unicode()),\n help=\"Directory where to look for database files such as csv\",\n ).tag(config=True, gui=True, kind=\"folder\")\n\n datadir = Union(\n (Instance(Path), Unicode()), help=\"Directory where to look for data by default\"\n ).tag(config=True, gui=True, kind=\"folder\")\n\n workspace = Union(\n (Instance(Path), Unicode()), help=\"Workspace directory by default\"\n ).tag(config=True, gui=True, kind=\"folder\")\n\n # ------------------------------------------------------------------------------------------------------------------\n # Configuration entries\n # ------------------------------------------------------------------------------------------------------------------\n\n autoload_project = Bool(\n True, help=\"Automatic loading of the last project at startup\"\n ).tag(config=True, gui=True)\n\n autosave_project = Bool(True, help=\"Automatic saving of the current project\").tag(\n config=True, gui=True\n )\n\n project_directory = Union(\n (Instance(Path), Unicode()),\n help=\"Directory where projects are stored by default\",\n ).tag(config=True, kind=\"folder\")\n\n last_project = Union(\n (Instance(Path, allow_none=True), Unicode()), help=\"Last used project\"\n ).tag(config=True, gui=True, kind=\"file\")\n\n show_close_dialog = Bool(\n True,\n help=\"Display the close project dialog project changing or on application exit\",\n ).tag(config=True, gui=True)\n\n csv_delimiter = Enum(\n [\",\", \";\", r\"\\t\", \" \"], default_value=\",\", help=\"CSV data delimiter\"\n ).tag(config=True, gui=True)\n\n @default(\"project_directory\")\n def _get_default_project_directory(self):\n # Determines the SpectroChemPy project directory name and creates the directory if it doesn't exist.\n # This directory is typically ``$HOME/spectrochempy/projects``, but if the SCP_PROJECTS_HOME environment\n # variable is set and the `$SCP_PROJECTS_HOME` directory exists, it will be that directory.\n # If neither exists, the former will be created.\n\n # first look for SCP_PROJECTS_HOME\n pscp = environ.get(\"SCP_PROJECTS_HOME\")\n if pscp is not None and Path(pscp).exists():\n return Path(pscp)\n\n pscp = Path.home() / \".spectrochempy\" / \"projects\"\n\n pscp.mkdir(exist_ok=True)\n\n if pscp.is_file():\n raise IOError(\"Intended Projects directory is actually a file.\")\n\n return pscp\n\n # ..................................................................................................................\n @default(\"workspace\")\n def _get_workspace_default(self):\n # the spectra path in package data\n return Path.home()\n\n # ..................................................................................................................\n @default(\"databases_directory\")\n def _get_databases_directory_default(self):\n # the spectra path in package data\n return Path(get_pkg_path(\"databases\", \"scp_data\"))\n\n # ..................................................................................................................\n @default(\"datadir\")\n def _get_default_datadir(self):\n return self.parent.datadir.path\n\n # ..................................................................................................................\n @observe(\"datadir\")\n def _datadir_changed(self, change):\n self.parent.datadir.path = pathclean(change[\"new\"])\n\n # ..................................................................................................................\n @property\n def log_level(self):\n \"\"\"\n int - logging level\n \"\"\"\n return self.parent.log_level\n\n # ..................................................................................................................\n @log_level.setter\n def log_level(self, value):\n if isinstance(value, str):\n value = getattr(logging, value, None)\n if value is None: # pragma: no cover\n warnings.warn(\n \"Log level not changed: invalid value given\\n\"\n \"string values must be DEBUG, INFO, WARNING, \"\n \"or ERROR\"\n )\n self.parent.log_level = value\n\n # ..................................................................................................................\n def __init__(self, **kwargs):\n super().__init__(jsonfile=\"GeneralPreferences\", **kwargs)\n\n\n# ======================================================================================================================\n# Application\n# ======================================================================================================================\n\n\nclass SpectroChemPy(Application):\n \"\"\"\n This class SpectroChemPy is the main class, containing most of the setup,\n configuration and more.\n \"\"\"\n\n icon = Unicode(\"scpy.png\")\n \"Icon for the application\"\n\n running = Bool(False)\n \"Running status of the |scpy| application\"\n\n name = Unicode(\"SpectroChemPy\")\n \"Running name of the application\"\n\n description = Unicode(\n \"SpectroChemPy is a framework for processing, analysing and modelling Spectroscopic data for \"\n \"Chemistry with Python.\"\n )\n \"Short description of the |scpy| application\"\n\n long_description = Unicode()\n \"Long description of the |scpy| application\"\n\n @default(\"long_description\")\n def _get_long_description(self):\n desc = \"\"\"\n<p><strong>SpectroChemPy</strong> is a framework for processing, analysing and modelling\n <strong>Spectro</>scopic data for <strong>Chem</strong>istry with <strong>Py</strong>thon.\n It is a cross platform software, running on Linux, Windows or OS X.</p><br><br>\n<strong>Version:</strong> {version}<br>\n<strong>Authors:</strong> {authors}<br>\n<strong>License:</strong> {license}<br>\n<div class='warning'> SpectroChemPy is still experimental and under active development. Its current design and\n functionalities are subject to major changes, reorganizations, bugs and crashes!!!. Please report any issues\nto the <a url='https://github.com/spectrochempy/spectrochempy/issues'>Issue Tracker<a>\n</div><br><br>\nWhen using <strong>SpectroChemPy</strong> for your own work, you are kindly requested to cite it this way:\n<pre>{cite}\n</pre></p>\"\"\".format(\n version=__release__, authors=__author__, license=__license__, cite=__cite__\n )\n\n return desc\n\n # ------------------------------------------------------------------------------------------------------------------\n # Configuration parameters\n # ------------------------------------------------------------------------------------------------------------------\n\n # Config file setting\n # ------------------------------------------------------------------------------------------------------------------\n _loaded_config_files = List()\n\n reset_config = Bool(False, help=\"Should we restore a default configuration ?\").tag(\n config=True\n )\n \"\"\"Flag: True if one wants to reset settings to the original config defaults\"\"\"\n\n config_file_name = Unicode(None, help=\"Configuration file name\").tag(config=True)\n \"\"\"Configuration file name\"\"\"\n\n @default(\"config_file_name\")\n def _get_config_file_name_default(self):\n return str(self.name).lower() + \"_cfg\"\n\n config_dir = Instance(Path, help=\"Set the configuration directory location\").tag(\n config=True\n )\n \"\"\"Configuration directory\"\"\"\n\n @default(\"config_dir\")\n def _get_config_dir_default(self):\n return self.get_config_dir()\n\n config_manager = Instance(BaseJSONConfigManager)\n\n @default(\"config_manager\")\n def _get_default_config_manager(self):\n return BaseJSONConfigManager(config_dir=str(self.config_dir))\n\n log_format = Unicode(\n \"%(highlevel)s %(message)s\",\n help=\"The Logging format template\",\n ).tag(config=True)\n\n debug = Bool(True, help=\"Set DEBUG mode, with full outputs\").tag(config=True)\n \"\"\"Flag to set debugging mode\"\"\"\n\n info = Bool(False, help=\"Set INFO mode, with msg outputs\").tag(config=True)\n \"\"\"Flag to set info mode\"\"\"\n\n quiet = Bool(False, help=\"Set Quiet mode, with minimal outputs\").tag(config=True)\n \"\"\"Flag to set in fully quite mode (even no warnings)\"\"\"\n\n nodisplay = Bool(False, help=\"Set NO DISPLAY mode, i.e., no graphics outputs\").tag(\n config=True\n )\n \"\"\"Flag to set in NO DISPLAY mode \"\"\"\n\n # last_project = Unicode('', help='Last used project').tag(config=True, type='project')\n # \"\"\"Last used project\"\"\"\n #\n # @observe('last_project')\n # def _last_project_changed(self, change):\n # if change.name in self.traits(config=True):\n # self.config_manager.update(self.config_file_name, {self.__class__.__name__: {change.name: change.new, }})\n\n show_config = Bool(help=\"Dump configuration to stdout at startup\").tag(config=True)\n\n @observe(\"show_config\")\n def _show_config_changed(self, change):\n if change.new:\n self._save_start = self.start\n self.start = self.start_show_config\n\n show_config_json = Bool(help=\"Dump configuration to stdout (as JSON)\").tag(\n config=True\n )\n\n @observe(\"show_config_json\")\n def _show_config_json_changed(self, change):\n self.show_config = change.new\n\n test = Bool(False, help=\"test flag\").tag(config=True)\n \"\"\"Flag to set the application in testing mode\"\"\"\n\n port = Integer(7000, help=\"Dash server port\").tag(config=True)\n \"\"\"Dash server port\"\"\"\n\n # Command line interface\n # ------------------------------------------------------------------------------------------------------------------\n\n aliases = dict(\n test=\"SpectroChemPy.test\",\n project=\"SpectroChemPy.last_project\",\n f=\"SpectroChemPy.startup_filename\",\n port=\"SpectroChemPy.port\",\n )\n\n flags = dict(\n debug=(\n {\"SpectroChemPy\": {\"log_level\": DEBUG}},\n \"Set log_level to DEBUG - most verbose mode\",\n ),\n info=(\n {\"SpectroChemPy\": {\"log_level\": INFO}},\n \"Set log_level to INFO - verbose mode\",\n ),\n quiet=(\n {\"SpectroChemPy\": {\"log_level\": ERROR}},\n \"Set log_level to ERROR - no verbosity at all\",\n ),\n nodisplay=(\n {\"SpectroChemPy\": {\"nodisplay\": True}},\n \"Set NO DISPLAY mode to true - no graphics at all\",\n ),\n reset_config=(\n {\"SpectroChemPy\": {\"reset_config\": True}},\n \"Reset config to default\",\n ),\n show_config=(\n {\n \"SpectroChemPy\": {\n \"show_config\": True,\n }\n },\n \"Show the application's configuration (human-readable \" \"format)\",\n ),\n show_config_json=(\n {\n \"SpectroChemPy\": {\n \"show_config_json\": True,\n }\n },\n \"Show the application's configuration (json \" \"format)\",\n ),\n )\n\n classes = List(\n [\n GeneralPreferences,\n PlotPreferences,\n DataDir,\n ]\n )\n\n # ------------------------------------------------------------------------------------------------------------------\n # Initialisation of the application\n # ------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, **kwargs):\n\n super().__init__(**kwargs)\n\n self.logs = (\n self.log\n ) # we change the no name in order to avoid latter conflict with numpy.log\n\n self.initialize()\n\n def initialize(self, argv=None):\n \"\"\"\n Initialisation function for the API applications\n\n Parameters\n ----------\n argv : List, [optional].\n List of configuration parameters.\n \"\"\"\n\n # parse the argv\n # --------------------------------------------------------------------\n\n # if we are running this under ipython and jupyter notebooks\n # deactivate potential command line arguments\n # (such that those from jupyter which cause problems here)\n\n IN_IPYTHON = False\n if InteractiveShell.initialized():\n IN_IPYTHON = True\n\n if not IN_IPYTHON:\n # remove argument not known by spectrochempy\n if \"make.py\" in sys.argv[0] or \"pytest\" in sys.argv[0]: # building docs\n options = []\n for item in sys.argv[:]:\n for k in list(self.flags.keys()):\n if item.startswith(\"--\" + k) or k in [\"--help\", \"--help-all\"]:\n options.append(item)\n continue\n for k in list(self.aliases.keys()):\n if item.startswith(\"-\" + k) or k in [\n \"h\",\n ]:\n options.append(item)\n self.parse_command_line(options)\n else: # pragma: no cover\n self.parse_command_line(sys.argv)\n\n # Get preferences from the config file and init everything\n # ---------------------------------------------------------------------\n\n self._init_all_preferences()\n\n # we catch warnings and error for a lighter display to the end-user.\n # except if we are in debugging mode\n\n # warning handler\n # --------------------------------------------------------------------\n def send_warnings_to_log(message, category):\n self.logs.warning(f\"{category.__name__} - {message}\")\n return\n\n warnings.showwarning = send_warnings_to_log\n\n # exception handler\n # --------------------------------------------------------------------\n\n if IN_IPYTHON: # pragma: no cover\n\n ip = get_ipython()\n\n def _custom_exc(shell, etype, evalue, tb, tb_offset=None):\n\n if self.log_level == logging.DEBUG:\n shell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)\n else:\n self.logs.error(f\"{etype.__name__}: {evalue}\")\n\n ip.set_custom_exc((Exception,), _custom_exc)\n\n # load our custom magic extensions\n # --------------------------------------------------------------------\n if ip is not None:\n ip.register_magics(SpectroChemPyMagics)\n\n def _init_all_preferences(self):\n\n # Get preferences from the config file\n # ---------------------------------------------------------------------\n\n if not self.config:\n self.config = Config()\n\n configfiles = []\n if self.config_file_name:\n config_file = self.config_dir / self.config_file_name\n configfiles.append(config_file)\n\n lis = self.config_dir.iterdir()\n for f in lis:\n if f.suffix == \".json\":\n jsonname = self.config_dir / f\n if self.reset_config or f == \"PlotPreferences.json\":\n # remove the user json file to reset to defaults\n jsonname.unlink()\n else:\n configfiles.append(jsonname)\n\n for cfgname in configfiles:\n self.load_config_file(cfgname)\n if cfgname not in self._loaded_config_files:\n self._loaded_config_files.append(cfgname)\n\n # Eventually write the default config file\n # --------------------------------------\n self._make_default_config_file()\n\n self.datadir = (\n DataDir()\n ) # config=self.config) -- passing args deprecated in traitlets 4.2\n self.preferences = GeneralPreferences(config=self.config, parent=self)\n self.plot_preferences = PlotPreferences(config=self.config, parent=self)\n\n # ..................................................................................................................\n @staticmethod\n def get_config_dir():\n \"\"\"\n Determines the SpectroChemPy configuration directory name and\n creates the directory if it doesn't exist.\n\n This directory is typically ``$HOME/.spectrochempy/config``,\n but if the\n SCP_CONFIG_HOME environment variable is set and the\n ``$SCP_CONFIG_HOME`` directory exists, it will be that\n directory.\n\n If neither exists, the former will be created.\n\n Returns\n -------\n config_dir : str\n The absolute path to the configuration directory.\n \"\"\"\n\n # first look for SCP_CONFIG_HOME\n scp = environ.get(\"SCP_CONFIG_HOME\")\n\n if scp is not None and Path(scp).exists():\n return Path(scp)\n\n config = _find_or_create_spectrochempy_dir() / \"config\"\n if not config.exists():\n config.mkdir(exist_ok=True)\n\n return config\n\n def start_show_config(self, **kwargs):\n \"\"\"start function used when show_config is True\"\"\"\n config = self.config.copy()\n # exclude show_config flags from displayed config\n for cls in self.__class__.mro():\n if cls.__name__ in config:\n cls_config = config[cls.__name__]\n cls_config.pop(\"show_config\", None)\n cls_config.pop(\"show_config_json\", None)\n\n if self.show_config_json:\n json.dump(config, sys.stdout, indent=1, sort_keys=True, default=repr)\n # add trailing newlines\n sys.stdout.write(\"\\n\")\n print()\n return self._start()\n\n if self._loaded_config_files:\n print(\"Loaded config files:\")\n for f in self._loaded_config_files:\n print(\" \" + f)\n print()\n\n for classname in sorted(config):\n class_config = config[classname]\n if not class_config:\n continue\n print(classname)\n pformat_kwargs = dict(indent=4)\n if sys.version_info >= (3, 4):\n # use compact pretty-print on Pythons that support it\n pformat_kwargs[\"compact\"] = True\n for traitname in sorted(class_config):\n value = class_config[traitname]\n print(\n \" .{} = {}\".format(\n traitname,\n pprint.pformat(value, **pformat_kwargs),\n )\n )\n print()\n\n # now run the actual start function\n return self._start()\n\n def reset_preferences(self):\n \"\"\"\n Reset all preferences to default\n\n \"\"\"\n self.reset_config = True\n self._init_all_preferences()\n self.reset_config = False\n\n # ------------------------------------------------------------------------------------------------------------------\n # start the application\n # ------------------------------------------------------------------------------------------------------------------\n\n def start(self):\n \"\"\"\n Start the |scpy| API\n\n All configuration must have been done before calling this function\n \"\"\"\n\n # print(f'{sys.argv}')\n\n return self._start()\n\n # ------------------------------------------------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def _start(self):\n\n if self.running:\n # API already started. Nothing done!\n return\n\n if self.preferences.show_info_on_loading:\n info_string = \"SpectroChemPy's API - v.{}\\n\" \"© Copyright {}\".format(\n __version__, __copyright__\n )\n ip = get_ipython()\n if ip is not None and \"TerminalInteractiveShell\" not in str(ip):\n display_info_string(message=info_string.strip())\n\n else:\n if \"/bin/scpy\" not in sys.argv[0]: # deactivate for console scripts\n print(info_string.strip())\n\n # force update of rcParams\n for rckey in mpl.rcParams.keys():\n key = rckey.replace(\"_\", \"__\").replace(\".\", \"_\").replace(\"-\", \"___\")\n try:\n mpl.rcParams[rckey] = getattr(self.plot_preferences, key)\n except ValueError:\n mpl.rcParams[rckey] = getattr(self.plot_preferences, key).replace(\n \"'\", \"\"\n )\n except AttributeError:\n # print(f'{e} -> you may want to add it to PlotPreferences.py')\n pass\n\n self.plot_preferences.set_latex_font(self.plot_preferences.font_family)\n\n self.running = True\n\n # display needs for update\n # time.sleep(1)\n fi = Path.home() / \".scpy_update\"\n if fi.exists():\n try:\n msg = fi.read_text()\n self.logs.warning(msg)\n except Exception:\n pass\n\n return True\n\n # ..................................................................................................................\n def _make_default_config_file(self):\n \"\"\"auto generate default config file.\"\"\"\n\n fname = self.config_dir / self.config_file_name\n fname = fname.with_suffix(\".py\")\n\n if not fname.exists() or self.reset_config:\n s = self.generate_config_file()\n self.logs.info(\"Generating default config file: %r\" % fname)\n with open(fname, \"w\") as f:\n f.write(s)\n\n # ------------------------------------------------------------------------------------------------------------------\n # Events from Application\n # ------------------------------------------------------------------------------------------------------------------\n\n @observe(\"log_level\")\n def _log_level_changed(self, change):\n\n self.log_format = \"%(message)s\"\n if change.new == DEBUG:\n self.log_format = \"[%(filename)s-%(funcName)s %(levelname)s] %(\" \"message)s\"\n self.logs._cache = {}\n self.logs.level = self.log_level\n for handler in self.logs.handlers:\n handler.level = self.log_level\n self.logs.info(\n \"changed default log_level to {}\".format(logging.getLevelName(change.new))\n )\n\n\n# ======================================================================================================================\n\nif __name__ == \"__main__\":\n pass\n"
]
| [
[
"matplotlib.rcParams.keys",
"matplotlib.pyplot.style.use"
]
]
|
atroyn/pytorch3d | [
"f03aa5803bcdba3b9447a8c2ffdc77e693d51c87"
]
| [
"tests/test_blending.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport unittest\n\nimport torch\nfrom common_testing import TestCaseMixin\nfrom pytorch3d.renderer.blending import (\n BlendParams,\n hard_rgb_blend,\n sigmoid_alpha_blend,\n softmax_rgb_blend,\n)\nfrom pytorch3d.renderer.mesh.rasterizer import Fragments\n\n\ndef sigmoid_blend_naive_loop(colors, fragments, blend_params):\n \"\"\"\n Naive for loop based implementation of distance based alpha calculation.\n Only for test purposes.\n \"\"\"\n pix_to_face = fragments.pix_to_face\n dists = fragments.dists\n sigma = blend_params.sigma\n\n N, H, W, K = pix_to_face.shape\n device = pix_to_face.device\n pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)\n\n for n in range(N):\n for h in range(H):\n for w in range(W):\n alpha = 1.0\n\n # Loop over k faces and calculate 2D distance based probability\n # map.\n for k in range(K):\n if pix_to_face[n, h, w, k] >= 0:\n prob = torch.sigmoid(-dists[n, h, w, k] / sigma)\n alpha *= 1.0 - prob # cumulative product\n pixel_colors[n, h, w, :3] = colors[n, h, w, 0, :]\n pixel_colors[n, h, w, 3] = 1.0 - alpha\n\n return pixel_colors\n\n\ndef sigmoid_alpha_blend_vectorized(colors, fragments, blend_params) -> torch.Tensor:\n N, H, W, K = fragments.pix_to_face.shape\n pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device)\n mask = fragments.pix_to_face >= 0\n prob = torch.sigmoid(-fragments.dists / blend_params.sigma) * mask\n pixel_colors[..., :3] = colors[..., 0, :]\n pixel_colors[..., 3] = 1.0 - torch.prod((1.0 - prob), dim=-1)\n return pixel_colors\n\n\ndef sigmoid_blend_naive_loop_backward(grad_images, images, fragments, blend_params):\n pix_to_face = fragments.pix_to_face\n dists = fragments.dists\n sigma = blend_params.sigma\n\n N, H, W, K = pix_to_face.shape\n device = pix_to_face.device\n grad_distances = torch.zeros((N, H, W, K), dtype=dists.dtype, device=device)\n\n for n in range(N):\n for h in range(H):\n for w in range(W):\n alpha = 1.0 - images[n, h, w, 3]\n grad_alpha = grad_images[n, h, w, 3]\n # Loop over k faces and calculate 2D distance based probability\n # map.\n for k in range(K):\n if pix_to_face[n, h, w, k] >= 0:\n prob = torch.sigmoid(-dists[n, h, w, k] / sigma)\n grad_distances[n, h, w, k] = (\n grad_alpha * (-1.0 / sigma) * prob * alpha\n )\n return grad_distances\n\n\ndef softmax_blend_naive(colors, fragments, blend_params):\n \"\"\"\n Naive for loop based implementation of softmax blending.\n Only for test purposes.\n \"\"\"\n pix_to_face = fragments.pix_to_face\n dists = fragments.dists\n zbuf = fragments.zbuf\n sigma = blend_params.sigma\n gamma = blend_params.gamma\n\n N, H, W, K = pix_to_face.shape\n device = pix_to_face.device\n pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)\n\n # Near and far clipping planes\n zfar = 100.0\n znear = 1.0\n eps = 1e-10\n\n bk_color = blend_params.background_color\n if not torch.is_tensor(bk_color):\n bk_color = torch.tensor(bk_color, dtype=colors.dtype, device=device)\n\n for n in range(N):\n for h in range(H):\n for w in range(W):\n alpha = 1.0\n weights_k = torch.zeros(K, device=device)\n zmax = torch.tensor(0.0, device=device)\n\n # Loop over K to find max z.\n for k in range(K):\n if pix_to_face[n, h, w, k] >= 0:\n zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)\n if zinv > zmax:\n zmax = zinv\n\n # Loop over K faces to calculate 2D distance based probability\n # map and zbuf based weights for colors.\n for k in range(K):\n if pix_to_face[n, h, w, k] >= 0:\n zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)\n prob = torch.sigmoid(-dists[n, h, w, k] / sigma)\n alpha *= 1.0 - prob # cumulative product\n weights_k[k] = prob * torch.exp((zinv - zmax) / gamma)\n\n # Clamp to ensure delta is never 0\n delta = torch.exp((eps - zmax) / blend_params.gamma).clamp(min=eps)\n delta = delta.to(device)\n denom = weights_k.sum() + delta\n cols = (weights_k[..., None] * colors[n, h, w, :, :]).sum(dim=0)\n pixel_colors[n, h, w, :3] = cols + delta * bk_color\n pixel_colors[n, h, w, :3] /= denom\n pixel_colors[n, h, w, 3] = 1.0 - alpha\n\n return pixel_colors\n\n\nclass TestBlending(TestCaseMixin, unittest.TestCase):\n def setUp(self) -> None:\n torch.manual_seed(42)\n\n def _compare_impls(\n self, fn1, fn2, args1, args2, grad_var1=None, grad_var2=None, compare_grads=True\n ):\n out1 = fn1(*args1)\n out2 = fn2(*args2)\n self.assertClose(out1.cpu()[..., 3], out2.cpu()[..., 3], atol=1e-7)\n\n # Check gradients\n if not compare_grads:\n return\n\n grad_out = torch.randn_like(out1)\n (out1 * grad_out).sum().backward()\n self.assertTrue(hasattr(grad_var1, \"grad\"))\n\n (out2 * grad_out).sum().backward()\n self.assertTrue(hasattr(grad_var2, \"grad\"))\n\n self.assertClose(grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5)\n\n def test_hard_rgb_blend(self):\n N, H, W, K = 5, 10, 10, 20\n pix_to_face = torch.randint(low=-1, high=100, size=(N, H, W, K))\n bary_coords = torch.ones((N, H, W, K, 3))\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=bary_coords,\n zbuf=pix_to_face, # dummy\n dists=pix_to_face, # dummy\n )\n colors = torch.randn((N, H, W, K, 3))\n blend_params = BlendParams(1e-4, 1e-4, (0.5, 0.5, 1))\n images = hard_rgb_blend(colors, fragments, blend_params)\n\n # Examine if the foreground colors are correct.\n is_foreground = pix_to_face[..., 0] >= 0\n self.assertClose(images[is_foreground][:, :3], colors[is_foreground][..., 0, :])\n\n # Examine if the background colors are correct.\n for i in range(3): # i.e. RGB\n channel_color = blend_params.background_color[i]\n self.assertTrue(images[~is_foreground][..., i].eq(channel_color).all())\n\n # Examine the alpha channel is correct\n self.assertTrue(images[..., 3].eq(1).all())\n\n def test_sigmoid_alpha_blend_manual_gradients(self):\n # Create dummy outputs of rasterization\n torch.manual_seed(231)\n F = 32 # number of faces in the mesh\n # The python loop version is really slow so only using small input sizes.\n N, S, K = 2, 3, 2\n device = torch.device(\"cuda\")\n pix_to_face = torch.randint(F + 1, size=(N, S, S, K), device=device) - 1\n colors = torch.randn((N, S, S, K, 3), device=device)\n empty = torch.tensor([], device=device)\n\n # # randomly flip the sign of the distance\n # # (-) means inside triangle, (+) means outside triangle.\n random_sign_flip = torch.rand((N, S, S, K))\n random_sign_flip[random_sign_flip > 0.5] *= -1.0\n dists = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=empty, # dummy\n zbuf=empty, # dummy\n dists=dists,\n )\n blend_params = BlendParams(sigma=1e-3)\n pix_cols = sigmoid_blend_naive_loop(colors, fragments, blend_params)\n grad_out = torch.randn_like(pix_cols)\n\n # Backward pass\n pix_cols.backward(grad_out)\n grad_dists = sigmoid_blend_naive_loop_backward(\n grad_out, pix_cols, fragments, blend_params\n )\n self.assertTrue(torch.allclose(dists.grad, grad_dists, atol=1e-7))\n\n def test_sigmoid_alpha_blend_python(self):\n \"\"\"\n Test outputs of python tensorised function and python loop\n \"\"\"\n\n # Create dummy outputs of rasterization\n torch.manual_seed(231)\n F = 32 # number of faces in the mesh\n # The python loop version is really slow so only using small input sizes.\n N, S, K = 1, 4, 1\n device = torch.device(\"cuda\")\n pix_to_face = torch.randint(low=-1, high=F, size=(N, S, S, K), device=device)\n colors = torch.randn((N, S, S, K, 3), device=device)\n empty = torch.tensor([], device=device)\n\n dists1 = torch.randn(size=(N, S, S, K), device=device)\n dists2 = dists1.clone()\n dists1.requires_grad = True\n dists2.requires_grad = True\n\n fragments1 = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=empty, # dummy\n zbuf=empty, # dummy\n dists=dists1,\n )\n fragments2 = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=empty, # dummy\n zbuf=empty, # dummy\n dists=dists2,\n )\n\n blend_params = BlendParams(sigma=1e-2)\n args1 = (colors, fragments1, blend_params)\n args2 = (colors, fragments2, blend_params)\n\n self._compare_impls(\n sigmoid_alpha_blend,\n sigmoid_alpha_blend_vectorized,\n args1,\n args2,\n dists1,\n dists2,\n compare_grads=True,\n )\n\n def test_softmax_rgb_blend(self):\n # Create dummy outputs of rasterization simulating a cube in the center\n # of the image with surrounding padded values.\n N, S, K = 1, 8, 2\n device = torch.device(\"cuda\")\n pix_to_face = torch.full(\n (N, S, S, K), fill_value=-1, dtype=torch.int64, device=device\n )\n h = int(S / 2)\n pix_to_face_full = torch.randint(\n size=(N, h, h, K), low=0, high=100, device=device\n )\n s = int(S / 4)\n e = int(0.75 * S)\n pix_to_face[:, s:e, s:e, :] = pix_to_face_full\n empty = torch.tensor([], device=device)\n\n random_sign_flip = torch.rand((N, S, S, K), device=device)\n random_sign_flip[random_sign_flip > 0.5] *= -1.0\n zbuf1 = torch.randn(size=(N, S, S, K), device=device)\n\n # randomly flip the sign of the distance\n # (-) means inside triangle, (+) means outside triangle.\n dists1 = torch.randn(size=(N, S, S, K), device=device) * random_sign_flip\n dists2 = dists1.clone()\n zbuf2 = zbuf1.clone()\n dists1.requires_grad = True\n dists2.requires_grad = True\n colors = torch.randn((N, S, S, K, 3), device=device)\n fragments1 = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=empty, # dummy\n zbuf=zbuf1,\n dists=dists1,\n )\n fragments2 = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=empty, # dummy\n zbuf=zbuf2,\n dists=dists2,\n )\n\n blend_params = BlendParams(sigma=1e-3)\n args1 = (colors, fragments1, blend_params)\n args2 = (colors, fragments2, blend_params)\n self._compare_impls(\n softmax_rgb_blend,\n softmax_blend_naive,\n args1,\n args2,\n dists1,\n dists2,\n compare_grads=True,\n )\n\n @staticmethod\n def bm_sigmoid_alpha_blending(\n num_meshes: int = 16,\n image_size: int = 128,\n faces_per_pixel: int = 100,\n device=\"cuda\",\n backend: str = \"pytorch\",\n ):\n device = torch.device(device)\n torch.manual_seed(231)\n\n # Create dummy outputs of rasterization\n N, S, K = num_meshes, image_size, faces_per_pixel\n F = 32 # num faces in the mesh\n pix_to_face = torch.randint(\n low=-1, high=F + 1, size=(N, S, S, K), device=device\n )\n colors = torch.randn((N, S, S, K, 3), device=device)\n empty = torch.tensor([], device=device)\n\n dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=empty, # dummy\n zbuf=empty, # dummy\n dists=dists1,\n )\n blend_params = BlendParams(sigma=1e-3)\n\n blend_fn = (\n sigmoid_alpha_blend_vectorized\n if backend == \"pytorch\"\n else sigmoid_alpha_blend\n )\n\n torch.cuda.synchronize()\n\n def fn():\n # test forward and backward pass\n images = blend_fn(colors, fragments, blend_params)\n images.sum().backward()\n torch.cuda.synchronize()\n\n return fn\n\n @staticmethod\n def bm_softmax_blending(\n num_meshes: int = 16,\n image_size: int = 128,\n faces_per_pixel: int = 100,\n device: str = \"cpu\",\n backend: str = \"pytorch\",\n ):\n if torch.cuda.is_available() and \"cuda:\" in device:\n # If a device other than the default is used, set the device explicity.\n torch.cuda.set_device(device)\n\n device = torch.device(device)\n torch.manual_seed(231)\n\n # Create dummy outputs of rasterization\n N, S, K = num_meshes, image_size, faces_per_pixel\n F = 32 # num faces in the mesh\n pix_to_face = torch.randint(\n low=-1, high=F + 1, size=(N, S, S, K), device=device\n )\n colors = torch.randn((N, S, S, K, 3), device=device)\n empty = torch.tensor([], device=device)\n\n dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)\n zbuf = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)\n fragments = Fragments(\n pix_to_face=pix_to_face, bary_coords=empty, zbuf=zbuf, dists=dists1 # dummy\n )\n blend_params = BlendParams(sigma=1e-3)\n\n torch.cuda.synchronize()\n\n def fn():\n # test forward and backward pass\n images = softmax_rgb_blend(colors, fragments, blend_params)\n images.sum().backward()\n torch.cuda.synchronize()\n\n return fn\n\n def test_blend_params(self):\n \"\"\"Test color parameter of BlendParams().\n Assert passed value overrides default value.\n \"\"\"\n bp_default = BlendParams()\n bp_new = BlendParams(background_color=(0.5, 0.5, 0.5))\n self.assertEqual(bp_new.background_color, (0.5, 0.5, 0.5))\n self.assertEqual(bp_default.background_color, (1.0, 1.0, 1.0))\n"
]
| [
[
"torch.zeros",
"torch.sigmoid",
"torch.device",
"torch.prod",
"torch.rand",
"torch.cuda.synchronize",
"torch.is_tensor",
"torch.ones",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.randn_like",
"torch.randint",
"torch.tensor",
"torch.full",
"torch.cuda.is_available",
"torch.allclose",
"torch.exp",
"torch.randn"
]
]
|
timoi-Lucypher/npCrypto | [
"10156482d70503fa01880421aba4a4a3d171bd98"
]
| [
"benchmark.py"
]
| [
"import time\nimport numpy as np\nfrom Crypto.Cipher import AES as pyAES\n\nfrom aes import AES\n\ndef benchmark_encryption(N):\n key = np.array([\n [0x2b, 0x7e, 0x15, 0x16],\n [0x28, 0xae, 0xd2, 0xa6],\n [0xab, 0xf7, 0x15, 0x88],\n [0x09, 0xcf, 0x4f, 0x3c]\n ], dtype=np.uint8)\n ref_aes = pyAES.new(key.tobytes(), pyAES.MODE_ECB)\n my_aes = AES(key)\n # Generate numpy plaintexts\n pts = np.random.randint(0, 256,\n size=(N, 4, 4), dtype=np.uint8)\n # Translate to bytes\n pts_bytes = []\n for pt in pts:\n pts_bytes.append(pt.tobytes())\n # Compare timings\n start = time.time() \n for pt in pts_bytes:\n ref_aes.encrypt(pt)\n duration_ref = time.time() - start\n print(f\"Reference time: {duration_ref}\")\n start = time.time()\n my_aes.encrypt(pts)\n duration = time.time() - start\n print(f\"Numpy aes time: {duration}\")\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Numpy AES benchmark.')\n parser.add_argument('--N', dest='N',\n default=1000,\n type=int,\n help='Number of random plaintexts to encrypt')\n\n args = parser.parse_args()\n\n benchmark_encryption(args.N)\n"
]
| [
[
"numpy.array",
"numpy.random.randint"
]
]
|
FlankMe/numpy-neural-net | [
"28de2324377e389fcdcb701979ff0387b9334e75"
]
| [
"_optimiser.py"
]
| [
"import numpy as np\r\nimport _supportingFunctions as sfs\r\n\r\n\"\"\"\r\nMain optimisation step\r\n\"\"\"\r\ndef optimisationStep(net, perturbation, method='SGD', \r\n LEARNINGRATE=3e-4, L2PENALTY=1e-4, DROPOUT=False): \r\n\r\n activation = net.prediction.pop()\r\n \r\n for layer in reversed(net.graph):\r\n\r\n perturbation, activation, weightsGradient, biasGradient = layer.back(perturbation, activation)\r\n\r\n if weightsGradient is None or biasGradient is None:\r\n continue\r\n \r\n effectiveLearningRate = LEARNINGRATE \r\n if method == 'Adam':\r\n # https://arxiv.org/pdf/1412.6980.pdf\r\n weightsGradient, biasGradient, LearningRateAdj = (\r\n AdamUpdateGradient(net, layer, weightsGradient, biasGradient) )\r\n effectiveLearningRate *= LearningRateAdj\r\n \r\n weightsGradient = sfs.clipValues(weightsGradient) \r\n biasGradient = sfs.clipValues(biasGradient) \r\n\r\n layer.Weights -= effectiveLearningRate * weightsGradient\r\n layer.bias -= effectiveLearningRate * biasGradient\r\n\r\n layer.Weights *= 1 - L2PENALTY\r\n layer.bias *= 1 - L2PENALTY\r\n\r\n\r\n\r\n\"\"\" \r\nAdam optimiser\r\nAlgorithm from paper: https://arxiv.org/pdf/1412.6980.pdf\r\n\"\"\"\r\ndef AdamUpdateGradient(net, layer, weightsGradient, biasGradient):\r\n\r\n BETA1, BETA2, EPSILON = 0.9, 0.999, 1e-8\r\n currentBeta1, currentBeta2 = net.AdamParameters\r\n \r\n weightsMomentum, biasMomentum = layer.AdamMomentum\r\n weightsVariance, biasVariance = layer.AdamVariance\r\n\r\n weightsMomentum = BETA1 * weightsMomentum + (1 - BETA1) * weightsGradient\r\n biasMomentum = BETA1 * biasMomentum + (1 - BETA1) * biasGradient\r\n\r\n weightsVariance = BETA2 * weightsVariance + (1 - BETA2) * (weightsGradient ** 2)\r\n biasVariance = BETA2 * biasVariance + (1 - BETA2) * (biasGradient ** 2)\r\n\r\n weightsGradient = weightsMomentum / (np.sqrt(weightsVariance) + EPSILON)\r\n biasGradient = biasMomentum / (np.sqrt(biasVariance) + EPSILON)\r\n\r\n layer.AdamMomentum = weightsMomentum, biasMomentum \r\n layer.AdamVariance = weightsVariance, biasVariance \r\n \r\n LearningRateAdj = np.sqrt(1 - currentBeta2) / (1 - currentBeta1)\r\n\r\n if layer == net.graph[0]:\r\n net.AdamParameters = [currentBeta1 * BETA1, currentBeta2 * BETA2]\r\n \r\n return weightsGradient, biasGradient, LearningRateAdj\r\n\r\n\r\ndef AdamInitialise(net):\r\n \r\n # Initialise variables possibly used by Adam optimiser \r\n currentBETA1, currentBETA2 = 0.9, 0.999\r\n for layer in net.graph:\r\n try:\r\n layer.AdamMomentum = [np.zeros(layer.Weights.shape), np.zeros(layer.bias.shape)]\r\n layer.AdamVariance = [np.zeros(layer.Weights.shape), np.zeros(layer.bias.shape)]\r\n except:\r\n continue\r\n net.AdamParameters = [currentBETA1, currentBETA2]\r\n return (net)\r\n \r\n"
]
| [
[
"numpy.zeros",
"numpy.sqrt"
]
]
|
gpzlx1/dgl | [
"f0fafa2062ccb23bfb996e84aa4758a435db9b1f"
]
| [
"tests/distributed/test_dist_graph_store.py"
]
| [
"import os\nos.environ['OMP_NUM_THREADS'] = '1'\nimport dgl\nimport sys\nimport numpy as np\nimport time\nimport socket\nfrom scipy import sparse as spsp\nfrom numpy.testing import assert_array_equal\nfrom multiprocessing import Process, Manager, Condition, Value\nimport multiprocessing as mp\nfrom dgl.heterograph_index import create_unitgraph_from_coo\nfrom dgl.data.utils import load_graphs, save_graphs\nfrom dgl.distributed import DistGraphServer, DistGraph\nfrom dgl.distributed import partition_graph, load_partition, load_partition_book, node_split, edge_split\nfrom numpy.testing import assert_almost_equal\nimport backend as F\nimport math\nimport unittest\nimport pickle\n\nif os.name != 'nt':\n import fcntl\n import struct\n\ndef get_local_usable_addr():\n \"\"\"Get local usable IP and port\n\n Returns\n -------\n str\n IP address, e.g., '192.168.8.12:50051'\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n sock.connect(('10.255.255.255', 1))\n ip_addr = sock.getsockname()[0]\n except ValueError:\n ip_addr = '127.0.0.1'\n finally:\n sock.close()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((\"\", 0))\n sock.listen(1)\n port = sock.getsockname()[1]\n sock.close()\n\n return ip_addr + ' ' + str(port)\n\ndef create_random_graph(n):\n arr = (spsp.random(n, n, density=0.001, format='coo', random_state=100) != 0).astype(np.int64)\n return dgl.from_scipy(arr)\n\ndef run_server(graph_name, server_id, server_count, num_clients, shared_mem):\n g = DistGraphServer(server_id, \"kv_ip_config.txt\", server_count, num_clients,\n '/tmp/dist_graph/{}.json'.format(graph_name),\n disable_shared_mem=not shared_mem)\n print('start server', server_id)\n g.start()\n\ndef emb_init(shape, dtype):\n return F.zeros(shape, dtype, F.cpu())\n\ndef rand_init(shape, dtype):\n return F.tensor(np.random.normal(size=shape), F.float32)\n\ndef check_dist_graph_empty(g, num_clients, num_nodes, num_edges):\n # Test API\n assert g.number_of_nodes() == num_nodes\n assert g.number_of_edges() == num_edges\n\n # Test init node data\n new_shape = (g.number_of_nodes(), 2)\n g.ndata['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)\n nids = F.arange(0, int(g.number_of_nodes() / 2))\n feats = g.ndata['test1'][nids]\n assert np.all(F.asnumpy(feats) == 0)\n\n # create a tensor and destroy a tensor and create it again.\n test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)\n del test3\n test3 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test3')\n del test3\n\n # Test write data\n new_feats = F.ones((len(nids), 2), F.int32, F.cpu())\n g.ndata['test1'][nids] = new_feats\n feats = g.ndata['test1'][nids]\n assert np.all(F.asnumpy(feats) == 1)\n\n # Test metadata operations.\n assert g.node_attr_schemes()['test1'].dtype == F.int32\n\n print('end')\n\ndef run_client_empty(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):\n time.sleep(5)\n os.environ['DGL_NUM_SERVER'] = str(server_count)\n dgl.distributed.initialize(\"kv_ip_config.txt\")\n gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),\n part_id, None)\n g = DistGraph(graph_name, gpb=gpb)\n check_dist_graph_empty(g, num_clients, num_nodes, num_edges)\n\ndef check_server_client_empty(shared_mem, num_servers, num_clients):\n prepare_dist()\n g = create_random_graph(10000)\n\n # Partition the graph\n num_parts = 1\n graph_name = 'dist_graph_test_1'\n partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')\n\n # let's just test on one partition for now.\n # We cannot run multiple servers and clients on the same machine.\n serv_ps = []\n ctx = mp.get_context('spawn')\n for serv_id in range(num_servers):\n p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,\n num_clients, shared_mem))\n serv_ps.append(p)\n p.start()\n\n cli_ps = []\n for cli_id in range(num_clients):\n print('start client', cli_id)\n p = ctx.Process(target=run_client_empty, args=(graph_name, 0, num_servers, num_clients,\n g.number_of_nodes(), g.number_of_edges()))\n p.start()\n cli_ps.append(p)\n\n for p in cli_ps:\n p.join()\n\n for p in serv_ps:\n p.join()\n\n print('clients have terminated')\n\ndef run_client(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):\n time.sleep(5)\n os.environ['DGL_NUM_SERVER'] = str(server_count)\n dgl.distributed.initialize(\"kv_ip_config.txt\")\n gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),\n part_id, None)\n g = DistGraph(graph_name, gpb=gpb)\n check_dist_graph(g, num_clients, num_nodes, num_edges)\n\ndef run_emb_client(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):\n time.sleep(5)\n os.environ['DGL_NUM_SERVER'] = str(server_count)\n dgl.distributed.initialize(\"kv_ip_config.txt\")\n gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),\n part_id, None)\n g = DistGraph(graph_name, gpb=gpb)\n check_dist_emb(g, num_clients, num_nodes, num_edges)\n\ndef run_client_hierarchy(graph_name, part_id, server_count, node_mask, edge_mask, return_dict):\n time.sleep(5)\n os.environ['DGL_NUM_SERVER'] = str(server_count)\n dgl.distributed.initialize(\"kv_ip_config.txt\")\n gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),\n part_id, None)\n g = DistGraph(graph_name, gpb=gpb)\n node_mask = F.tensor(node_mask)\n edge_mask = F.tensor(edge_mask)\n nodes = node_split(node_mask, g.get_partition_book(), node_trainer_ids=g.ndata['trainer_id'])\n edges = edge_split(edge_mask, g.get_partition_book(), edge_trainer_ids=g.edata['trainer_id'])\n rank = g.rank()\n return_dict[rank] = (nodes, edges)\n\ndef check_dist_emb(g, num_clients, num_nodes, num_edges):\n from dgl.distributed.optim import SparseAdagrad\n from dgl.distributed.nn import NodeEmbedding\n # Test sparse emb\n try:\n emb = NodeEmbedding(g.number_of_nodes(), 1, 'emb1', emb_init)\n nids = F.arange(0, int(g.number_of_nodes()))\n lr = 0.001\n optimizer = SparseAdagrad([emb], lr=lr)\n with F.record_grad():\n feats = emb(nids)\n assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))\n loss = F.sum(feats + 1, 0)\n loss.backward()\n optimizer.step()\n feats = emb(nids)\n if num_clients == 1:\n assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * -lr)\n rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))\n feats1 = emb(rest)\n assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))\n\n policy = dgl.distributed.PartitionPolicy('node', g.get_partition_book())\n grad_sum = dgl.distributed.DistTensor((g.number_of_nodes(), 1), F.float32,\n 'emb1_sum', policy)\n if num_clients == 1:\n assert np.all(F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)) * num_clients)\n assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))\n\n emb = NodeEmbedding(g.number_of_nodes(), 1, 'emb2', emb_init)\n with F.no_grad():\n feats1 = emb(nids)\n assert np.all(F.asnumpy(feats1) == 0)\n\n optimizer = SparseAdagrad([emb], lr=lr)\n with F.record_grad():\n feats1 = emb(nids)\n feats2 = emb(nids)\n feats = F.cat([feats1, feats2], 0)\n assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))\n loss = F.sum(feats + 1, 0)\n loss.backward()\n optimizer.step()\n with F.no_grad():\n feats = emb(nids)\n if num_clients == 1:\n assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * 1 * -lr)\n rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))\n feats1 = emb(rest)\n assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))\n except NotImplementedError as e:\n pass\n except Exception as e:\n print(e)\n sys.exit(-1)\n\ndef check_dist_graph(g, num_clients, num_nodes, num_edges):\n # Test API\n assert g.number_of_nodes() == num_nodes\n assert g.number_of_edges() == num_edges\n\n # Test reading node data\n nids = F.arange(0, int(g.number_of_nodes() / 2))\n feats1 = g.ndata['features'][nids]\n feats = F.squeeze(feats1, 1)\n assert np.all(F.asnumpy(feats == nids))\n\n # Test reading edge data\n eids = F.arange(0, int(g.number_of_edges() / 2))\n feats1 = g.edata['features'][eids]\n feats = F.squeeze(feats1, 1)\n assert np.all(F.asnumpy(feats == eids))\n\n # Test init node data\n new_shape = (g.number_of_nodes(), 2)\n g.ndata['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)\n feats = g.ndata['test1'][nids]\n assert np.all(F.asnumpy(feats) == 0)\n\n # reference to a one that exists\n test2 = dgl.distributed.DistTensor(new_shape, F.float32, 'test2', init_func=rand_init)\n test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test2')\n assert np.all(F.asnumpy(test2[nids]) == F.asnumpy(test3[nids]))\n\n # create a tensor and destroy a tensor and create it again.\n test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)\n del test3\n test3 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test3')\n del test3\n\n # add tests for anonymous distributed tensor.\n test3 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)\n data = test3[0:10]\n test4 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)\n del test3\n test5 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)\n assert np.sum(F.asnumpy(test5[0:10] != data)) > 0\n\n # test a persistent tesnor\n test4 = dgl.distributed.DistTensor(new_shape, F.float32, 'test4', init_func=rand_init,\n persistent=True)\n del test4\n try:\n test4 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test4')\n raise Exception('')\n except:\n pass\n\n # Test write data\n new_feats = F.ones((len(nids), 2), F.int32, F.cpu())\n g.ndata['test1'][nids] = new_feats\n feats = g.ndata['test1'][nids]\n assert np.all(F.asnumpy(feats) == 1)\n\n # Test metadata operations.\n assert len(g.ndata['features']) == g.number_of_nodes()\n assert g.ndata['features'].shape == (g.number_of_nodes(), 1)\n assert g.ndata['features'].dtype == F.int64\n assert g.node_attr_schemes()['features'].dtype == F.int64\n assert g.node_attr_schemes()['test1'].dtype == F.int32\n assert g.node_attr_schemes()['features'].shape == (1,)\n\n selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes()) > 30\n # Test node split\n nodes = node_split(selected_nodes, g.get_partition_book())\n nodes = F.asnumpy(nodes)\n # We only have one partition, so the local nodes are basically all nodes in the graph.\n local_nids = np.arange(g.number_of_nodes())\n for n in nodes:\n assert n in local_nids\n\n print('end')\n\ndef check_dist_emb_server_client(shared_mem, num_servers, num_clients):\n prepare_dist()\n g = create_random_graph(10000)\n\n # Partition the graph\n num_parts = 1\n graph_name = 'dist_graph_test_2'\n g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)\n g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)\n partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')\n\n # let's just test on one partition for now.\n # We cannot run multiple servers and clients on the same machine.\n serv_ps = []\n ctx = mp.get_context('spawn')\n for serv_id in range(num_servers):\n p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,\n num_clients, shared_mem))\n serv_ps.append(p)\n p.start()\n\n cli_ps = []\n for cli_id in range(num_clients):\n print('start client', cli_id)\n p = ctx.Process(target=run_emb_client, args=(graph_name, 0, num_servers, num_clients,\n g.number_of_nodes(),\n g.number_of_edges()))\n p.start()\n cli_ps.append(p)\n\n for p in cli_ps:\n p.join()\n assert p.exitcode == 0\n\n for p in serv_ps:\n p.join()\n\n print('clients have terminated')\n\ndef check_server_client(shared_mem, num_servers, num_clients):\n prepare_dist()\n g = create_random_graph(10000)\n\n # Partition the graph\n num_parts = 1\n graph_name = 'dist_graph_test_2'\n g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)\n g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)\n partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')\n\n # let's just test on one partition for now.\n # We cannot run multiple servers and clients on the same machine.\n serv_ps = []\n ctx = mp.get_context('spawn')\n for serv_id in range(num_servers):\n p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,\n num_clients, shared_mem))\n serv_ps.append(p)\n p.start()\n\n cli_ps = []\n for cli_id in range(num_clients):\n print('start client', cli_id)\n p = ctx.Process(target=run_client, args=(graph_name, 0, num_servers, num_clients, g.number_of_nodes(),\n g.number_of_edges()))\n p.start()\n cli_ps.append(p)\n\n for p in cli_ps:\n p.join()\n\n for p in serv_ps:\n p.join()\n\n print('clients have terminated')\n\ndef check_server_client_hierarchy(shared_mem, num_servers, num_clients):\n prepare_dist()\n g = create_random_graph(10000)\n\n # Partition the graph\n num_parts = 1\n graph_name = 'dist_graph_test_2'\n g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)\n g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)\n partition_graph(g, graph_name, num_parts, '/tmp/dist_graph', num_trainers_per_machine=num_clients)\n\n # let's just test on one partition for now.\n # We cannot run multiple servers and clients on the same machine.\n serv_ps = []\n ctx = mp.get_context('spawn')\n for serv_id in range(num_servers):\n p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,\n num_clients, shared_mem))\n serv_ps.append(p)\n p.start()\n\n cli_ps = []\n manager = mp.Manager()\n return_dict = manager.dict()\n node_mask = np.zeros((g.number_of_nodes(),), np.int32)\n edge_mask = np.zeros((g.number_of_edges(),), np.int32)\n nodes = np.random.choice(g.number_of_nodes(), g.number_of_nodes() // 10, replace=False)\n edges = np.random.choice(g.number_of_edges(), g.number_of_edges() // 10, replace=False)\n node_mask[nodes] = 1\n edge_mask[edges] = 1\n nodes = np.sort(nodes)\n edges = np.sort(edges)\n for cli_id in range(num_clients):\n print('start client', cli_id)\n p = ctx.Process(target=run_client_hierarchy, args=(graph_name, 0, num_servers,\n node_mask, edge_mask, return_dict))\n p.start()\n cli_ps.append(p)\n\n for p in cli_ps:\n p.join()\n for p in serv_ps:\n p.join()\n\n nodes1 = []\n edges1 = []\n for n, e in return_dict.values():\n nodes1.append(n)\n edges1.append(e)\n nodes1, _ = F.sort_1d(F.cat(nodes1, 0))\n edges1, _ = F.sort_1d(F.cat(edges1, 0))\n assert np.all(F.asnumpy(nodes1) == nodes)\n assert np.all(F.asnumpy(edges1) == edges)\n\n print('clients have terminated')\n\n\ndef run_client_hetero(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):\n time.sleep(5)\n os.environ['DGL_NUM_SERVER'] = str(server_count)\n dgl.distributed.initialize(\"kv_ip_config.txt\")\n gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),\n part_id, None)\n g = DistGraph(graph_name, gpb=gpb)\n check_dist_graph_hetero(g, num_clients, num_nodes, num_edges)\n\ndef create_random_hetero():\n num_nodes = {'n1': 10000, 'n2': 10010, 'n3': 10020}\n etypes = [('n1', 'r1', 'n2'),\n ('n1', 'r2', 'n3'),\n ('n2', 'r3', 'n3')]\n edges = {}\n for etype in etypes:\n src_ntype, _, dst_ntype = etype\n arr = spsp.random(num_nodes[src_ntype], num_nodes[dst_ntype], density=0.001, format='coo',\n random_state=100)\n edges[etype] = (arr.row, arr.col)\n g = dgl.heterograph(edges, num_nodes)\n g.nodes['n1'].data['feat'] = F.unsqueeze(F.arange(0, g.number_of_nodes('n1')), 1)\n g.edges['r1'].data['feat'] = F.unsqueeze(F.arange(0, g.number_of_edges('r1')), 1)\n return g\n\ndef check_dist_graph_hetero(g, num_clients, num_nodes, num_edges):\n # Test API\n for ntype in num_nodes:\n assert ntype in g.ntypes\n assert num_nodes[ntype] == g.number_of_nodes(ntype)\n for etype in num_edges:\n assert etype in g.etypes\n assert num_edges[etype] == g.number_of_edges(etype)\n assert g.number_of_nodes() == sum([num_nodes[ntype] for ntype in num_nodes])\n assert g.number_of_edges() == sum([num_edges[etype] for etype in num_edges])\n\n # Test reading node data\n nids = F.arange(0, int(g.number_of_nodes('n1') / 2))\n feats1 = g.nodes['n1'].data['feat'][nids]\n feats = F.squeeze(feats1, 1)\n assert np.all(F.asnumpy(feats == nids))\n\n # Test reading edge data\n eids = F.arange(0, int(g.number_of_edges('r1') / 2))\n feats1 = g.edges['r1'].data['feat'][eids]\n feats = F.squeeze(feats1, 1)\n assert np.all(F.asnumpy(feats == eids))\n\n # Test init node data\n new_shape = (g.number_of_nodes('n1'), 2)\n g.nodes['n1'].data['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)\n feats = g.nodes['n1'].data['test1'][nids]\n assert np.all(F.asnumpy(feats) == 0)\n\n # create a tensor and destroy a tensor and create it again.\n test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)\n del test3\n test3 = dgl.distributed.DistTensor((g.number_of_nodes('n1'), 3), F.float32, 'test3')\n del test3\n\n # add tests for anonymous distributed tensor.\n test3 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)\n data = test3[0:10]\n test4 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)\n del test3\n test5 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)\n assert np.sum(F.asnumpy(test5[0:10] != data)) > 0\n\n # test a persistent tesnor\n test4 = dgl.distributed.DistTensor(new_shape, F.float32, 'test4', init_func=rand_init,\n persistent=True)\n del test4\n try:\n test4 = dgl.distributed.DistTensor((g.number_of_nodes('n1'), 3), F.float32, 'test4')\n raise Exception('')\n except:\n pass\n\n # Test write data\n new_feats = F.ones((len(nids), 2), F.int32, F.cpu())\n g.nodes['n1'].data['test1'][nids] = new_feats\n feats = g.nodes['n1'].data['test1'][nids]\n assert np.all(F.asnumpy(feats) == 1)\n\n # Test metadata operations.\n assert len(g.nodes['n1'].data['feat']) == g.number_of_nodes('n1')\n assert g.nodes['n1'].data['feat'].shape == (g.number_of_nodes('n1'), 1)\n assert g.nodes['n1'].data['feat'].dtype == F.int64\n\n selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes('n1')) > 30\n # Test node split\n nodes = node_split(selected_nodes, g.get_partition_book(), ntype='n1')\n nodes = F.asnumpy(nodes)\n # We only have one partition, so the local nodes are basically all nodes in the graph.\n local_nids = np.arange(g.number_of_nodes('n1'))\n for n in nodes:\n assert n in local_nids\n\n print('end')\n\ndef check_server_client_hetero(shared_mem, num_servers, num_clients):\n prepare_dist()\n g = create_random_hetero()\n\n # Partition the graph\n num_parts = 1\n graph_name = 'dist_graph_test_3'\n partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')\n\n # let's just test on one partition for now.\n # We cannot run multiple servers and clients on the same machine.\n serv_ps = []\n ctx = mp.get_context('spawn')\n for serv_id in range(num_servers):\n p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,\n num_clients, shared_mem))\n serv_ps.append(p)\n p.start()\n\n cli_ps = []\n num_nodes = {ntype: g.number_of_nodes(ntype) for ntype in g.ntypes}\n num_edges = {etype: g.number_of_edges(etype) for etype in g.etypes}\n for cli_id in range(num_clients):\n print('start client', cli_id)\n p = ctx.Process(target=run_client_hetero, args=(graph_name, 0, num_servers, num_clients, num_nodes,\n num_edges))\n p.start()\n cli_ps.append(p)\n\n for p in cli_ps:\n p.join()\n\n for p in serv_ps:\n p.join()\n\n print('clients have terminated')\n\[email protected](os.name == 'nt', reason='Do not support windows yet')\[email protected](dgl.backend.backend_name == \"tensorflow\", reason=\"TF doesn't support some of operations in DistGraph\")\ndef test_server_client():\n os.environ['DGL_DIST_MODE'] = 'distributed'\n check_server_client_hierarchy(False, 1, 4)\n check_server_client_empty(True, 1, 1)\n check_server_client_hetero(True, 1, 1)\n check_server_client_hetero(False, 1, 1)\n check_server_client(True, 1, 1)\n check_server_client(False, 1, 1)\n check_server_client(True, 2, 2)\n check_server_client(False, 2, 2)\n\[email protected](os.name == 'nt', reason='Do not support windows yet')\[email protected](dgl.backend.backend_name == \"tensorflow\", reason=\"TF doesn't support distributed NodeEmbedding\")\[email protected](dgl.backend.backend_name == \"mxnet\", reason=\"Mxnet doesn't support distributed NodeEmbedding\")\ndef test_dist_emb_server_client():\n os.environ['DGL_DIST_MODE'] = 'distributed'\n check_dist_emb_server_client(True, 1, 1)\n check_dist_emb_server_client(False, 1, 1)\n check_dist_emb_server_client(True, 2, 2)\n\[email protected](dgl.backend.backend_name == \"tensorflow\", reason=\"TF doesn't support some of operations in DistGraph\")\ndef test_standalone():\n os.environ['DGL_DIST_MODE'] = 'standalone'\n\n g = create_random_graph(10000)\n # Partition the graph\n num_parts = 1\n graph_name = 'dist_graph_test_3'\n g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)\n g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)\n partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')\n\n dgl.distributed.initialize(\"kv_ip_config.txt\")\n dist_g = DistGraph(graph_name, part_config='/tmp/dist_graph/{}.json'.format(graph_name))\n try:\n check_dist_graph(dist_g, 1, g.number_of_nodes(), g.number_of_edges())\n except Exception as e:\n print(e)\n dgl.distributed.exit_client() # this is needed since there's two test here in one process\n\[email protected](dgl.backend.backend_name == \"tensorflow\", reason=\"TF doesn't support distributed NodeEmbedding\")\[email protected](dgl.backend.backend_name == \"mxnet\", reason=\"Mxnet doesn't support distributed NodeEmbedding\")\ndef test_standalone_node_emb():\n os.environ['DGL_DIST_MODE'] = 'standalone'\n\n g = create_random_graph(10000)\n # Partition the graph\n num_parts = 1\n graph_name = 'dist_graph_test_3'\n g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)\n g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)\n partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')\n\n dgl.distributed.initialize(\"kv_ip_config.txt\")\n dist_g = DistGraph(graph_name, part_config='/tmp/dist_graph/{}.json'.format(graph_name))\n try:\n check_dist_emb(dist_g, 1, g.number_of_nodes(), g.number_of_edges())\n except Exception as e:\n print(e)\n dgl.distributed.exit_client() # this is needed since there's two test here in one process\n\[email protected](os.name == 'nt', reason='Do not support windows yet')\ndef test_split():\n #prepare_dist()\n g = create_random_graph(10000)\n num_parts = 4\n num_hops = 2\n partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')\n\n node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30\n edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30\n selected_nodes = np.nonzero(node_mask)[0]\n selected_edges = np.nonzero(edge_mask)[0]\n\n # The code now collects the roles of all client processes and use the information\n # to determine how to split the workloads. Here is to simulate the multi-client\n # use case.\n def set_roles(num_clients):\n dgl.distributed.role.CUR_ROLE = 'default'\n dgl.distributed.role.GLOBAL_RANK = {i:i for i in range(num_clients)}\n dgl.distributed.role.PER_ROLE_RANK['default'] = {i:i for i in range(num_clients)}\n\n for i in range(num_parts):\n set_roles(num_parts)\n part_g, node_feats, edge_feats, gpb, _, _, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)\n local_nids = F.nonzero_1d(part_g.ndata['inner_node'])\n local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)\n nodes1 = np.intersect1d(selected_nodes, F.asnumpy(local_nids))\n nodes2 = node_split(node_mask, gpb, rank=i, force_even=False)\n assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes2)))\n local_nids = F.asnumpy(local_nids)\n for n in nodes1:\n assert n in local_nids\n\n set_roles(num_parts * 2)\n nodes3 = node_split(node_mask, gpb, rank=i * 2, force_even=False)\n nodes4 = node_split(node_mask, gpb, rank=i * 2 + 1, force_even=False)\n nodes5 = F.cat([nodes3, nodes4], 0)\n assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes5)))\n\n set_roles(num_parts)\n local_eids = F.nonzero_1d(part_g.edata['inner_edge'])\n local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)\n edges1 = np.intersect1d(selected_edges, F.asnumpy(local_eids))\n edges2 = edge_split(edge_mask, gpb, rank=i, force_even=False)\n assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges2)))\n local_eids = F.asnumpy(local_eids)\n for e in edges1:\n assert e in local_eids\n\n set_roles(num_parts * 2)\n edges3 = edge_split(edge_mask, gpb, rank=i * 2, force_even=False)\n edges4 = edge_split(edge_mask, gpb, rank=i * 2 + 1, force_even=False)\n edges5 = F.cat([edges3, edges4], 0)\n assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges5)))\n\[email protected](os.name == 'nt', reason='Do not support windows yet')\ndef test_split_even():\n #prepare_dist(1)\n g = create_random_graph(10000)\n num_parts = 4\n num_hops = 2\n partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')\n\n node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30\n edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30\n selected_nodes = np.nonzero(node_mask)[0]\n selected_edges = np.nonzero(edge_mask)[0]\n all_nodes1 = []\n all_nodes2 = []\n all_edges1 = []\n all_edges2 = []\n\n # The code now collects the roles of all client processes and use the information\n # to determine how to split the workloads. Here is to simulate the multi-client\n # use case.\n def set_roles(num_clients):\n dgl.distributed.role.CUR_ROLE = 'default'\n dgl.distributed.role.GLOBAL_RANK = {i:i for i in range(num_clients)}\n dgl.distributed.role.PER_ROLE_RANK['default'] = {i:i for i in range(num_clients)}\n\n for i in range(num_parts):\n set_roles(num_parts)\n part_g, node_feats, edge_feats, gpb, _, _, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)\n local_nids = F.nonzero_1d(part_g.ndata['inner_node'])\n local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)\n nodes = node_split(node_mask, gpb, rank=i, force_even=True)\n all_nodes1.append(nodes)\n subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(local_nids))\n print('part {} get {} nodes and {} are in the partition'.format(i, len(nodes), len(subset)))\n\n set_roles(num_parts * 2)\n nodes1 = node_split(node_mask, gpb, rank=i * 2, force_even=True)\n nodes2 = node_split(node_mask, gpb, rank=i * 2 + 1, force_even=True)\n nodes3, _ = F.sort_1d(F.cat([nodes1, nodes2], 0))\n all_nodes2.append(nodes3)\n subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(nodes3))\n print('intersection has', len(subset))\n\n set_roles(num_parts)\n local_eids = F.nonzero_1d(part_g.edata['inner_edge'])\n local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)\n edges = edge_split(edge_mask, gpb, rank=i, force_even=True)\n all_edges1.append(edges)\n subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(local_eids))\n print('part {} get {} edges and {} are in the partition'.format(i, len(edges), len(subset)))\n\n set_roles(num_parts * 2)\n edges1 = edge_split(edge_mask, gpb, rank=i * 2, force_even=True)\n edges2 = edge_split(edge_mask, gpb, rank=i * 2 + 1, force_even=True)\n edges3, _ = F.sort_1d(F.cat([edges1, edges2], 0))\n all_edges2.append(edges3)\n subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(edges3))\n print('intersection has', len(subset))\n all_nodes1 = F.cat(all_nodes1, 0)\n all_edges1 = F.cat(all_edges1, 0)\n all_nodes2 = F.cat(all_nodes2, 0)\n all_edges2 = F.cat(all_edges2, 0)\n all_nodes = np.nonzero(node_mask)[0]\n all_edges = np.nonzero(edge_mask)[0]\n assert np.all(all_nodes == F.asnumpy(all_nodes1))\n assert np.all(all_edges == F.asnumpy(all_edges1))\n assert np.all(all_nodes == F.asnumpy(all_nodes2))\n assert np.all(all_edges == F.asnumpy(all_edges2))\n\ndef prepare_dist():\n ip_config = open(\"kv_ip_config.txt\", \"w\")\n ip_addr = get_local_usable_addr()\n ip_config.write('{}\\n'.format(ip_addr))\n ip_config.close()\n\nif __name__ == '__main__':\n os.makedirs('/tmp/dist_graph', exist_ok=True)\n test_dist_emb_server_client()\n test_server_client()\n test_split()\n test_split_even()\n test_standalone()\n test_standalone_node_emb()\n"
]
| [
[
"numpy.random.normal",
"scipy.sparse.random",
"numpy.sort",
"numpy.nonzero"
]
]
|
hmorimitsu/flow-transforms-pytorch | [
"f01ed22b6f7eab7ee5fd00a3b37a16c424eecc94"
]
| [
"tests/test_additive_color.py"
]
| [
"import numpy as np\nimport pytest\nimport torch\n\nfrom tests.base import (\n images_list, flows_list, NUM_IMAGES, IMAGE_HEIGHT, IMAGE_WIDTH)\nfrom flow_transforms import ToTensor, RandomAdditiveColor\n\n\[email protected]\ndef get_tensor_data(images_list, flows_list):\n tt = ToTensor()\n images, flows = tt(images_list, flows_list)\n return images, flows\n\n\ndef test_add(get_tensor_data):\n images, flows = get_tensor_data\n ac = RandomAdditiveColor(stdev=1.0, independent=False)\n images2, flows2 = ac(images.clone(), flows)\n diff = images2[0, 0, 0, 0] - images[0, 0, 0, 0]\n assert torch.allclose(images+diff, images2)\n\n\ndef test_add_independent(get_tensor_data):\n images, flows = get_tensor_data\n ac = RandomAdditiveColor(stdev=1.0, independent=True)\n images2, flows2 = ac(images.clone(), flows)\n diff = images2[0, 0, 0, 0] - images[0, 0, 0, 0]\n assert not torch.allclose(images+diff, images2)\n for i in range(images.shape[0]):\n diff = images2[i, 0, 0, 0] - images[i, 0, 0, 0]\n assert torch.allclose(images[i]+diff, images2[i])\n"
]
| [
[
"torch.allclose"
]
]
|
richardgoater/metaspace | [
"5db6b2fd0170b8d90aabe04b887a2a7c6adefdc4"
]
| [
"metaspace/engine/sm/engine/annotation_spark/formula_imager.py"
]
| [
"import logging\nimport pickle\nfrom pathlib import Path\nfrom typing import List, Dict, Set\n\nimport numpy as np\nimport pandas as pd\nfrom pyspark.files import SparkFiles\nfrom scipy.sparse import coo_matrix\n\nfrom sm.engine.isocalc_wrapper import IsocalcWrapper\nfrom sm.engine.annotation.formula_validator import (\n make_compute_image_metrics,\n formula_image_metrics,\n)\n\nlogger = logging.getLogger('engine')\n\n\n# pylint: disable=too-many-locals\n# this function is compute performance optimized\ndef gen_iso_images(ds_segm_it, centr_df, nrows, ncols, isocalc):\n for ds_segm_df in ds_segm_it:\n ds_segm_mz_min, _ = isocalc.mass_accuracy_bounds(ds_segm_df.mz.values[0])\n _, ds_segm_mz_max = isocalc.mass_accuracy_bounds(ds_segm_df.mz.values[-1])\n\n centr_df_slice = centr_df[(centr_df.mz >= ds_segm_mz_min) & (centr_df.mz <= ds_segm_mz_max)]\n\n centr_mzs = centr_df_slice.mz.values\n centr_f_inds = centr_df_slice.formula_i.values\n centr_p_inds = centr_df_slice.peak_i.values\n centr_ints = centr_df_slice.int.values\n\n lower, upper = isocalc.mass_accuracy_bounds(centr_mzs)\n lower_inds = np.searchsorted(ds_segm_df.mz.values, lower, 'l')\n upper_inds = np.searchsorted(ds_segm_df.mz.values, upper, 'r')\n\n # Note: consider going in the opposite direction so that\n # formula_image_metrics can check for the first peak images instead of the last\n for i, (lo_i, up_i) in enumerate(zip(lower_inds, upper_inds)):\n m = None\n if up_i - lo_i > 0:\n data = ds_segm_df.int.values[lo_i:up_i]\n inds = ds_segm_df.sp_idx.values[lo_i:up_i]\n row_inds = inds / ncols\n col_inds = inds % ncols\n m = coo_matrix((data, (row_inds, col_inds)), shape=(nrows, ncols), copy=True)\n yield centr_f_inds[i], centr_p_inds[i], centr_ints[i], m\n\n\ndef get_ds_dims(coordinates):\n min_x, min_y = np.amin(coordinates, axis=0)\n max_x, max_y = np.amax(coordinates, axis=0)\n nrows, ncols = max_y - min_y + 1, max_x - min_x + 1\n return nrows, ncols\n\n\ndef get_pixel_indices(coordinates):\n _coord = np.array(coordinates)\n _coord = np.around(_coord, 5) # correct for numerical precision\n _coord -= np.amin(_coord, axis=0)\n\n _, ncols = get_ds_dims(coordinates)\n pixel_indices = _coord[:, 1] * ncols + _coord[:, 0]\n pixel_indices = pixel_indices.astype(np.int32)\n return pixel_indices\n\n\ndef make_sample_area_mask(coordinates):\n pixel_indices = get_pixel_indices(coordinates)\n nrows, ncols = get_ds_dims(coordinates)\n sample_area_mask = np.zeros(ncols * nrows, dtype=bool)\n sample_area_mask[pixel_indices] = True\n return sample_area_mask.reshape(nrows, ncols)\n\n\ndef choose_ds_segments(ds_segments, centr_df, ppm):\n centr_segm_min_mz, centr_segm_max_mz = centr_df.mz.agg([np.min, np.max])\n centr_segm_min_mz -= centr_segm_min_mz * ppm * 1e-6\n centr_segm_max_mz += centr_segm_max_mz * ppm * 1e-6\n\n first_ds_segm_i = np.searchsorted(ds_segments[:, 0], centr_segm_min_mz, side='right') - 1\n first_ds_segm_i = max(0, first_ds_segm_i)\n last_ds_segm_i = np.searchsorted(\n ds_segments[:, 1], centr_segm_max_mz, side='left'\n ) # last included\n last_ds_segm_i = min(len(ds_segments) - 1, last_ds_segm_i)\n return first_ds_segm_i, last_ds_segm_i\n\n\ndef read_ds_segment(segm_path):\n sp_chunk_list = []\n try:\n with open(segm_path, 'rb') as f:\n while True:\n sp_chunk_list.append(pickle.load(f))\n except EOFError:\n pass\n\n return pd.concat(sp_chunk_list) if sp_chunk_list else None\n\n\ndef read_centroids_segment(segm_path):\n with open(segm_path, 'rb') as f:\n return pickle.load(f)\n\n\ndef read_ds_segments(first_segm_i, last_segm_i):\n for ds_segm_i in range(first_segm_i, last_segm_i + 1):\n segm_path = get_file_path(f'ds_segm_{ds_segm_i:04}.pickle')\n ds_segm_df = read_ds_segment(segm_path)\n if ds_segm_df is not None:\n yield ds_segm_df.sort_values(by='mz')\n\n\ndef get_file_path(name):\n return Path(SparkFiles.get(name))\n\n\ndef create_process_segment(\n ds_segments: List,\n coordinates: np.ndarray,\n ds_config: Dict,\n target_formula_inds: Set[int],\n targeted_database_formula_inds: Set[int],\n):\n sample_area_mask = make_sample_area_mask(coordinates)\n nrows, ncols = get_ds_dims(coordinates)\n compute_metrics = make_compute_image_metrics(\n sample_area_mask, nrows, ncols, ds_config['image_generation']\n )\n isocalc = IsocalcWrapper(ds_config)\n ppm = ds_config['image_generation']['ppm']\n min_px = ds_config['image_generation']['min_px']\n n_peaks = ds_config['isotope_generation']['n_peaks']\n\n def process_centr_segment(segm_i):\n centr_segm_path = get_file_path(f'centr_segm_{segm_i:04}.pickle')\n\n formula_metrics_df, formula_images = pd.DataFrame(), {}\n if centr_segm_path.exists():\n logger.info(f'Reading centroids segment {segm_i} from {centr_segm_path}')\n\n centr_df = read_centroids_segment(centr_segm_path)\n first_ds_segm_i, last_ds_segm_i = choose_ds_segments(ds_segments, centr_df, ppm)\n\n logger.info(f'Reading dataset segments {first_ds_segm_i}-{last_ds_segm_i}')\n\n ds_segm_it = read_ds_segments(first_ds_segm_i, last_ds_segm_i)\n formula_images_it = gen_iso_images(\n ds_segm_it, centr_df=centr_df, nrows=nrows, ncols=ncols, isocalc=isocalc\n )\n formula_metrics_df, formula_images = formula_image_metrics(\n formula_images_it,\n compute_metrics,\n target_formula_inds,\n targeted_database_formula_inds,\n n_peaks,\n min_px,\n )\n logger.info(f'Segment {segm_i} finished')\n else:\n logger.warning(f'Centroids segment path not found {centr_segm_path}')\n\n return formula_metrics_df, formula_images\n\n return process_centr_segment\n"
]
| [
[
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.zeros",
"numpy.searchsorted",
"pandas.concat",
"pandas.DataFrame",
"numpy.amax",
"numpy.amin",
"numpy.around"
]
]
|
matthiasdiener/loopy | [
"a1e46b6205902f19d541d8abf9f5382ecab9c8ad"
]
| [
"test/test_target.py"
]
| [
"from __future__ import division, absolute_import, print_function\n\n__copyright__ = \"Copyright (C) 2012 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport loopy as lp\nimport pyopencl as cl\nimport pyopencl.clmath # noqa\nimport pyopencl.clrandom # noqa\nimport pytest\n\nfrom loopy.target.c import CTarget\nfrom loopy.target.opencl import OpenCLTarget\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntry:\n import faulthandler\nexcept ImportError:\n pass\nelse:\n faulthandler.enable()\n\nfrom pyopencl.tools import pytest_generate_tests_for_pyopencl \\\n as pytest_generate_tests\n\n__all__ = [\n \"pytest_generate_tests\",\n \"cl\" # 'cl.create_some_context'\n ]\n\n\nfrom loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa\n\n\ndef test_ispc_target(occa_mode=False):\n from loopy.target.ispc import ISPCTarget\n\n knl = lp.make_kernel(\n \"{ [i]: 0<=i<n }\",\n \"out[i] = 2*a[i]\",\n [\n lp.GlobalArg(\"out,a\", np.float32, shape=lp.auto),\n \"...\"\n ],\n target=ISPCTarget(occa_mode=occa_mode))\n\n knl = lp.split_iname(knl, \"i\", 8, inner_tag=\"l.0\")\n knl = lp.split_iname(knl, \"i_outer\", 4, outer_tag=\"g.0\", inner_tag=\"ilp\")\n knl = lp.add_prefetch(knl, \"a\", [\"i_inner\", \"i_outer_inner\"],\n default_tag=\"l.auto\")\n\n codegen_result = lp.generate_code_v2(\n lp.get_one_scheduled_kernel(\n lp.preprocess_kernel(knl)))\n\n print(codegen_result.device_code())\n print(codegen_result.host_code())\n\n\ndef test_cuda_target():\n from loopy.target.cuda import CudaTarget\n\n knl = lp.make_kernel(\n \"{ [i]: 0<=i<n }\",\n \"out[i] = 2*a[i]\",\n [\n lp.GlobalArg(\"out,a\", np.float32, shape=lp.auto),\n \"...\"\n ],\n target=CudaTarget())\n\n knl = lp.split_iname(knl, \"i\", 8, inner_tag=\"l.0\")\n knl = lp.split_iname(knl, \"i_outer\", 4, outer_tag=\"g.0\", inner_tag=\"ilp\")\n knl = lp.add_prefetch(knl, \"a\", [\"i_inner\", \"i_outer_inner\"],\n default_tag=\"l.auto\")\n\n print(\n lp.generate_code(\n lp.get_one_scheduled_kernel(\n lp.preprocess_kernel(knl)))[0])\n\n\ndef test_generate_c_snippet():\n from pymbolic import var\n I = var(\"I\") # noqa\n f = var(\"f\")\n df = var(\"df\")\n q_v = var(\"q_v\")\n eN = var(\"eN\") # noqa\n k = var(\"k\")\n u = var(\"u\")\n\n from functools import partial\n l_sum = partial(lp.Reduction, \"sum\", allow_simultaneous=True)\n\n Instr = lp.Assignment # noqa\n\n knl = lp.make_kernel(\n \"{[I, k]: 0<=I<nSpace and 0<=k<nQuad}\",\n [\n Instr(f[I], l_sum(k, q_v[k, I]*u)),\n Instr(df[I], l_sum(k, q_v[k, I])),\n ],\n [\n lp.GlobalArg(\"q_v\", np.float64, shape=\"nQuad, nSpace\"),\n lp.GlobalArg(\"f,df\", np.float64, shape=\"nSpace\"),\n lp.ValueArg(\"u\", np.float64),\n \"...\",\n ],\n target=CTarget(),\n assumptions=\"nQuad>=1\")\n\n if 0: # enable to play with prefetching\n # (prefetch currently requires constant sizes)\n knl = lp.fix_parameters(knl, nQuad=5, nSpace=3)\n knl = lp.add_prefetch(knl, \"q_v\", \"k,I\", default_tag=None)\n\n knl = lp.split_iname(knl, \"k\", 4, inner_tag=\"unr\", slabs=(0, 1))\n knl = lp.prioritize_loops(knl, \"I,k_outer,k_inner\")\n\n knl = lp.preprocess_kernel(knl)\n knl = lp.get_one_scheduled_kernel(knl)\n print(lp.generate_body(knl))\n\n\[email protected](\"target\", [CTarget, OpenCLTarget])\[email protected](\"tp\", [\"f32\", \"f64\"])\ndef test_math_function(target, tp):\n # Test correct maths functions are generated for C and OpenCL\n # backend instead for different data type\n\n data_type = {\"f32\": np.float32,\n \"f64\": np.float64}[tp]\n\n import pymbolic.primitives as p\n\n i = p.Variable(\"i\")\n xi = p.Subscript(p.Variable(\"x\"), i)\n yi = p.Subscript(p.Variable(\"y\"), i)\n zi = p.Subscript(p.Variable(\"z\"), i)\n\n n = 100\n domain = \"{[i]: 0<=i<%d}\" % n\n data = [lp.GlobalArg(\"x\", data_type, shape=(n,)),\n lp.GlobalArg(\"y\", data_type, shape=(n,)),\n lp.GlobalArg(\"z\", data_type, shape=(n,))]\n\n inst = [lp.Assignment(xi, p.Variable(\"min\")(yi, zi))]\n knl = lp.make_kernel(domain, inst, data, target=target())\n code = lp.generate_code_v2(knl).device_code()\n\n assert \"fmin\" in code\n\n if tp == \"f32\" and target == CTarget:\n assert \"fminf\" in code\n else:\n assert \"fminf\" not in code\n\n inst = [lp.Assignment(xi, p.Variable(\"max\")(yi, zi))]\n knl = lp.make_kernel(domain, inst, data, target=target())\n code = lp.generate_code_v2(knl).device_code()\n\n assert \"fmax\" in code\n\n if tp == \"f32\" and target == CTarget:\n assert \"fmaxf\" in code\n else:\n assert \"fmaxf\" not in code\n\n\[email protected](\"tp\", [\"f32\", \"f64\"])\ndef test_random123(ctx_factory, tp):\n ctx = ctx_factory()\n queue = cl.CommandQueue(ctx)\n\n import pyopencl.version # noqa\n if cl.version.VERSION < (2016, 2):\n pytest.skip(\"Random123 RNG not supported in PyOpenCL < 2016.2\")\n\n n = 150000\n\n knl = lp.make_kernel(\n \"{ [i]: 0<=i<n }\",\n \"\"\"\n <> key2 = make_uint2(i, 324830944) {inames=i}\n <> key4 = make_uint4(i, 324830944, 234181, 2233) {inames=i}\n <> ctr = make_uint4(0, 1, 2, 3) {inames=i,id=init_ctr}\n <> real, ctr = philox4x32_TYPE(ctr, key2) {id=realpart,dep=init_ctr}\n <> imag, ctr = threefry4x32_TYPE(ctr, key4) {dep=init_ctr:realpart}\n\n out[i, 0] = real.s0 + 1j * imag.s0\n out[i, 1] = real.s1 + 1j * imag.s1\n out[i, 2] = real.s2 + 1j * imag.s2\n out[i, 3] = real.s3 + 1j * imag.s3\n \"\"\".replace(\"TYPE\", tp))\n\n knl = lp.split_iname(knl, \"i\", 128, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.set_options(knl, write_cl=True)\n\n evt, (out,) = knl(queue, n=n)\n\n out = out.get()\n assert (out < 1).all()\n assert (0 <= out).all()\n\n\ndef test_tuple(ctx_factory):\n ctx = ctx_factory()\n queue = cl.CommandQueue(ctx)\n\n import islpy as isl\n knl = lp.make_kernel(\n [isl.BasicSet(\"[] -> {[]: }\")],\n \"\"\"\n a, b = make_tuple(1, 2.)\n \"\"\")\n\n evt, (a, b) = knl(queue)\n\n assert a.get() == 1\n assert b.get() == 2.\n\n\ndef test_clamp(ctx_factory):\n ctx = ctx_factory()\n queue = cl.CommandQueue(ctx)\n\n n = 15 * 10**6\n x = cl.clrandom.rand(queue, n, dtype=np.float32)\n\n knl = lp.make_kernel(\n \"{ [i]: 0<=i<n }\",\n \"out[i] = clamp(x[i], a, b)\")\n\n knl = lp.split_iname(knl, \"i\", 128, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.set_options(knl, write_cl=True)\n\n evt, (out,) = knl(queue, x=x, a=np.float32(12), b=np.float32(15))\n\n\ndef test_numba_target():\n knl = lp.make_kernel(\n \"{[i,j,k]: 0<=i,j<M and 0<=k<N}\",\n \"D[i,j] = sqrt(sum(k, (X[i, k]-X[j, k])**2))\",\n target=lp.NumbaTarget())\n\n knl = lp.add_and_infer_dtypes(knl, {\"X\": np.float32})\n\n print(lp.generate_code_v2(knl).device_code())\n\n\ndef test_numba_cuda_target():\n knl = lp.make_kernel(\n \"{[i,j,k]: 0<=i,j<M and 0<=k<N}\",\n \"D[i,j] = sqrt(sum(k, (X[i, k]-X[j, k])**2))\",\n target=lp.NumbaCudaTarget())\n\n knl = lp.assume(knl, \"M>0\")\n knl = lp.split_iname(knl, \"i\", 16, outer_tag='g.0')\n knl = lp.split_iname(knl, \"j\", 128, inner_tag='l.0', slabs=(0, 1))\n knl = lp.add_prefetch(knl, \"X[i,:]\", default_tag=\"l.auto\")\n knl = lp.fix_parameters(knl, N=3)\n knl = lp.prioritize_loops(knl, \"i_inner,j_outer\")\n knl = lp.tag_inames(knl, \"k:unr\")\n knl = lp.tag_array_axes(knl, \"X\", \"N0,N1\")\n\n knl = lp.add_and_infer_dtypes(knl, {\"X\": np.float32})\n\n print(lp.generate_code_v2(knl).all_code())\n\n\ndef test_sized_integer_c_codegen(ctx_factory):\n ctx = ctx_factory()\n queue = cl.CommandQueue(ctx)\n\n from pymbolic import var\n knl = lp.make_kernel(\n \"{[i]: 0<=i<n}\",\n [lp.Assignment(\"a[i]\", lp.TypeCast(np.int64, 1) << var(\"i\"))]\n )\n\n knl = lp.set_options(knl, write_code=True)\n n = 40\n\n evt, (a,) = knl(queue, n=n)\n\n a_ref = 1 << np.arange(n, dtype=np.int64)\n\n assert np.array_equal(a_ref, a.get())\n\n\ndef test_child_invalid_type_cast():\n from pymbolic import var\n knl = lp.make_kernel(\n \"{[i]: 0<=i<n}\",\n [\"<> ctr = make_uint2(0, 0)\",\n lp.Assignment(\"a[i]\", lp.TypeCast(np.int64, var(\"ctr\")) << var(\"i\"))]\n )\n\n with pytest.raises(lp.LoopyError):\n knl = lp.preprocess_kernel(knl)\n\n\ndef test_target_invalid_type_cast():\n dtype = np.dtype([('', '<u4'), ('', '<i4')])\n with pytest.raises(lp.LoopyError):\n lp.TypeCast(dtype, 1)\n\n\ndef test_ispc_streaming_stores():\n stream_dtype = np.float32\n index_dtype = np.int32\n\n knl = lp.make_kernel(\n \"{[i]: 0<=i<n}\",\n \"a[i] = b[i] + scalar * c[i]\",\n target=lp.ISPCTarget(), index_dtype=index_dtype,\n name=\"stream_triad\")\n\n vars = [\"a\", \"b\", \"c\", \"scalar\"]\n knl = lp.assume(knl, \"n>0\")\n knl = lp.split_iname(\n knl, \"i\", 2**18, outer_tag=\"g.0\", slabs=(0, 1))\n knl = lp.split_iname(knl, \"i_inner\", 8, inner_tag=\"l.0\")\n knl = lp.tag_instructions(knl, \"!streaming_store\")\n\n knl = lp.add_and_infer_dtypes(knl, {\n var: stream_dtype\n for var in vars\n })\n\n knl = lp.set_argument_order(knl, vars + [\"n\"])\n\n knl = lp.preprocess_kernel(knl)\n knl = lp.get_one_scheduled_kernel(knl)\n lp.generate_code_v2(knl).all_code()\n\n\ndef test_cuda_short_vector():\n knl = lp.make_kernel(\n \"{ [i]: 0<=i<n }\",\n \"out[i] = 2*a[i]\",\n target=lp.CudaTarget())\n\n knl = lp.set_options(knl, write_code=True)\n knl = lp.split_iname(knl, \"i\", 4, slabs=(0, 1), inner_tag=\"vec\")\n knl = lp.split_array_axis(knl, \"a,out\", axis_nr=0, count=4)\n knl = lp.tag_array_axes(knl, \"a,out\", \"C,vec\")\n\n knl = lp.set_options(knl, write_wrapper=True)\n knl = lp.add_and_infer_dtypes(knl, {\"a\": np.float32})\n\n print(lp.generate_code_v2(knl).device_code())\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n from pytest import main\n main([__file__])\n\n# vim: foldmethod=marker\n"
]
| [
[
"numpy.float32",
"numpy.arange",
"numpy.dtype"
]
]
|
jesseddeng/Machine-learning-Final-Project | [
"87284d51e1a968035feae33a1f9f44754a144a00"
]
| [
"app/__init__.py"
]
| [
"from flask import Flask\nfrom sklearn.externals import joblib\n\n\napp = Flask(__name__)\napp.config.from_object(\"app.config\")\n\n\nestimator = joblib.load('predicted.pkl')\n\n\n\n\n\n\nfrom .views import *\n\n\n# Handle Bad Requests\[email protected](404)\ndef page_not_found(e):\n \"\"\"Page Not Found\"\"\"\n return render_template('404.html'), 404\n"
]
| [
[
"sklearn.externals.joblib.load"
]
]
|
basemprince/court_detection | [
"e5fb9549234b0f3c8fd350ea9abc1c7daa1c2306"
]
| [
"train.py"
]
| [
"#!/usr/bin/env python3\r\n\"\"\"Train L-CNN\r\nUsage:\r\n train.py [options] <yaml-config>\r\n train.py (-h | --help )\r\n\r\nArguments:\r\n <yaml-config> Path to the yaml hyper-parameter file\r\n\r\nOptions:\r\n -h --help Show this screen.\r\n -d --devices <devices> Comma seperated GPU devices [default: 0]\r\n -i --identifier <identifier> Folder identifier [default: default-identifier]\r\n\"\"\"\r\n\r\nimport datetime\r\nimport glob\r\nimport os\r\nimport os.path as osp\r\nimport platform\r\nimport pprint\r\nimport random\r\nimport shlex\r\nimport shutil\r\nimport signal\r\nimport subprocess\r\nimport sys\r\nimport threading\r\n\r\nimport numpy as np\r\nimport torch\r\nimport yaml\r\nfrom docopt import docopt\r\nimport scipy.io as sio\r\nimport lcnn\r\nfrom lcnn.config import C, M\r\nfrom lcnn.datasets import WireframeDataset, collate\r\nfrom lcnn.models.line_vectorizer import LineVectorizer\r\nfrom lcnn.models.multitask_learner import MultitaskHead, MultitaskLearner\r\nfrom lcnn.models.HT import hough_transform\r\n\r\n\r\ndef get_outdir(identifier):\r\n # load config\r\n name = str(datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\"))\r\n name += \"-%s\" % identifier\r\n outdir = osp.join(osp.expanduser(C.io.logdir), name)\r\n if not osp.exists(outdir):\r\n os.makedirs(outdir)\r\n C.io.resume_from = outdir\r\n C.to_yaml(osp.join(outdir, \"config.yaml\"))\r\n return outdir\r\n\r\n\r\ndef main():\r\n args = docopt(__doc__)\r\n config_file = args[\"<yaml-config>\"] or \"config/wireframe.yaml\"\r\n C.update(C.from_yaml(filename=config_file))\r\n M.update(C.model)\r\n pprint.pprint(C, indent=4)\r\n resume_from = C.io.resume_from\r\n\r\n # WARNING: L-CNN is still not deterministic\r\n random.seed(0)\r\n np.random.seed(0)\r\n torch.manual_seed(0)\r\n\r\n device_name = \"cpu\"\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args[\"--devices\"]\r\n if torch.cuda.is_available():\r\n device_name = \"cuda\"\r\n torch.backends.cudnn.deterministic = True\r\n torch.cuda.manual_seed(0)\r\n print(\"Let's use\", torch.cuda.device_count(), \"GPU(s)!\")\r\n else:\r\n print(\"CUDA is not available\")\r\n device = torch.device(device_name)\r\n\r\n # 1. dataset\r\n\r\n # uncomment for debug DataLoader\r\n # wireframe.datasets.WireframeDataset(datadir, split=\"train\")[0]\r\n # sys.exit(0)\r\n\r\n datadir = C.io.datadir\r\n kwargs = {\r\n \"collate_fn\": collate,\r\n \"num_workers\": C.io.num_workers if os.name != \"nt\" else 0,\r\n \"pin_memory\": True,\r\n }\r\n train_loader = torch.utils.data.DataLoader(\r\n WireframeDataset(datadir, split=\"train\"),\r\n shuffle=True,\r\n batch_size=M.batch_size,\r\n **kwargs,\r\n )\r\n val_loader = torch.utils.data.DataLoader(\r\n WireframeDataset(datadir, split=\"valid\"),\r\n shuffle=False,\r\n batch_size=M.batch_size_eval,\r\n **kwargs,\r\n )\r\n epoch_size = len(train_loader)\r\n # print(\"epoch_size (train):\", epoch_size)\r\n # print(\"epoch_size (valid):\", len(val_loader))\r\n\r\n if resume_from:\r\n checkpoint = torch.load(osp.join(resume_from, \"checkpoint_latest.pth\"))\r\n\r\n # 2. model\r\n ### load vote_index matrix for Hough transform\r\n ### defualt settings: (128, 128, 3, 1)\r\n if os.path.isfile(C.io.vote_index):\r\n print('load vote_index ... ')\r\n vote_index = sio.loadmat(C.io.vote_index)['vote_index']\r\n else:\r\n print('compute vote_index ... ')\r\n vote_index = hough_transform(rows=128, cols=128, theta_res=3, rho_res=1)\r\n sio.savemat(C.io.vote_index, {'vote_index': vote_index})\r\n vote_index = torch.from_numpy(vote_index).float().contiguous().to(device)\r\n print('vote_index loaded', vote_index.shape)\r\n\r\n if M.backbone == \"stacked_hourglass\":\r\n model = lcnn.models.hg(\r\n depth=M.depth,\r\n head=MultitaskHead,\r\n num_stacks=M.num_stacks,\r\n num_blocks=M.num_blocks,\r\n num_classes=sum(sum(M.head_size, [])),\r\n vote_index=vote_index,\r\n )\r\n else:\r\n raise NotImplementedError\r\n\r\n model = MultitaskLearner(model)\r\n model = LineVectorizer(model)\r\n print(\"model:\", model)\r\n train_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\r\n print('num of total parameters', train_params)\r\n\r\n if resume_from:\r\n model.load_state_dict(checkpoint[\"model_state_dict\"])\r\n model = model.to(device)\r\n\r\n # 3. optimizer\r\n if C.optim.name == \"Adam\":\r\n optim = torch.optim.Adam(\r\n model.parameters(),\r\n lr=C.optim.lr,\r\n weight_decay=C.optim.weight_decay,\r\n amsgrad=C.optim.amsgrad,\r\n )\r\n elif C.optim.name == \"SGD\":\r\n optim = torch.optim.SGD(\r\n model.parameters(),\r\n lr=C.optim.lr,\r\n weight_decay=C.optim.weight_decay,\r\n momentum=C.optim.momentum,\r\n )\r\n else:\r\n raise NotImplementedError\r\n\r\n if resume_from:\r\n optim.load_state_dict(checkpoint[\"optim_state_dict\"])\r\n outdir = resume_from or get_outdir(args[\"--identifier\"])\r\n print(\"outdir:\", outdir)\r\n\r\n try:\r\n trainer = lcnn.trainer.Trainer(\r\n device=device,\r\n model=model,\r\n optimizer=optim,\r\n train_loader=train_loader,\r\n val_loader=val_loader,\r\n out=outdir,\r\n )\r\n if resume_from:\r\n trainer.iteration = checkpoint[\"iteration\"]\r\n if trainer.iteration % epoch_size != 0:\r\n print(\"WARNING: iteration is not a multiple of epoch_size, reset it\")\r\n trainer.iteration -= trainer.iteration % epoch_size\r\n trainer.best_mean_loss = checkpoint[\"best_mean_loss\"]\r\n del checkpoint\r\n trainer.train()\r\n except BaseException:\r\n if len(glob.glob(f\"{outdir}/viz/*\")) <= 1:\r\n shutil.rmtree(outdir)\r\n raise\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
]
| [
[
"torch.device",
"torch.cuda.manual_seed",
"numpy.random.seed",
"scipy.io.loadmat",
"scipy.io.savemat",
"torch.manual_seed",
"torch.cuda.device_count",
"torch.from_numpy",
"torch.cuda.is_available"
]
]
|
qyu6/TAILab | [
"6c3e7a7e2e49f7c673ab46b90c1568a96cce75b7"
]
| [
"ML_LinearRegression.py"
]
| [
"'''\n@func:linear regression\n@create:2021.10.24\n'''\n\nfrom textwrap import wrap\n\n\ndef linearregx():\n import streamlit as st\n # 线性回归器。为了实现线性输出与实际输出的残差平方和最小(sum of squares of differences),普通最小二乘法 - (OLS:Oridinary Least Squares)\n import sys\n import numpy as np\n\n X = [1,2,3,4,6,7,8,9,11,13,14,15,15,16,17,18,19,20]\n y = [12,23,34,45,56,67,78,80,100,30,114,124,131,142,151,167,50,40]\n\n col1,col2 = st.columns(2)\n col1.write('X value')\n col1.write(np.array(X))\n col2.write('y value')\n col2.write(np.array(y))\n\n\n num_training = int(0.8*len(X))\n num_test = len(X) - num_training\n\n X_train = np.array(X[:num_training]).reshape((num_training,1))\n y_train = np.array(y[:num_training])\n\n X_test = np.array(X[num_training:]).reshape((num_test,1))\n y_test = np.array(y[num_training:])\n\n\n col1,col2,col3,col4 = st.columns(4)\n col1.write('X_train')\n col1.write(X_train)\n col2.write('y_train')\n col2.write(y_train)\n col3.write('X_test')\n col3.write(X_test)\n col4.write('y_test')\n col4.write(y_test)\n\n\n from sklearn import linear_model\n linear_regressor = linear_model.LinearRegression()\n linear_regressor.fit(X_train,y_train)\n\n import matplotlib.pyplot as plt\n y_train_pred = linear_regressor.predict(X_train)\n fig1,ax1 = plt.subplots()\n ax1.grid(alpha=0.4)\n ax1.scatter(X_train,y_train,color='green')\n ax1.plot(X_train,y_train_pred,color='black',linewidth = 1)\n plt.title('Training data')\n st.pyplot(fig1)\n\n y_test_pred = linear_regressor.predict(X_test)\n fig2,ax2 = plt.subplots()\n ax2.grid(alpha=0.4)\n ax2.scatter(X_test,y_test,color='red')\n ax2.plot(X_test,y_test_pred,color='black',linewidth = 1)\n plt.title('Test data')\n st.pyplot(fig2)\n\n # 评价回归拟合器误差,几个重要的指标(metrics)\n # 平均绝对误差-MAE:mean absolute error\n # 均方误差-MES:mean squared error,误差平方均值\n # 中位数绝对误差-median absolute error,用中位数可以消除异常值outlier的干扰\n # 解释方差分-EVS:explained variance score,对数据集波动的解释能力\n # R^2得分-拟合优度\n # [通常做法]:保证均方误差最低,解释方差分最高\n import sklearn.metrics as sm\n st.write(\"Mean absolute error=\",round(sm.mean_absolute_error(y_test,y_test_pred),2))\n st.write(\"Mean squared error=\",round(sm.mean_squared_error(y_test,y_test_pred),2))\n st.write(\"Median absolute error=\",round(sm.median_absolute_error(y_test,y_test_pred),2))\n st.write(\"Explained variance score=\",round(sm.explained_variance_score(y_test,y_test_pred),2))\n st.write(\"R^2 score=\",round(sm.r2_score(y_test,y_test_pred),2))\n\n\n\n # # 以下内容测试没有通过。一种保存模型数据的方法\n # # 保存模型数据:\n # import cPickle as pickle\n # output_model_file = 'saved_model.pkl'\n # with open(output_model_file,'w') as f:\n # pickle.dump(linear_regressor,f)\n\n # # 加载并使用数据:\n # with open(output_model_file,'r') as f:\n # model_linregr = pickle.load(f)\n\n # y_test_pred_new = model_linregr.predict(X_test)\n # print('\\nNew mean absolute error=',round(sm.mean_absolute_error(y_test,y_test_pred_new),2))\n\n\n\n\n# test\n# linearregx()"
]
| [
[
"numpy.array",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"sklearn.metrics.explained_variance_score",
"sklearn.metrics.median_absolute_error",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.r2_score"
]
]
|
juriwiens/mlflow-example | [
"ccf517d70ec96a3661d586e55bde4915e3329087"
]
| [
"train.py"
]
| [
"\"\"\"\nKeras model training example, copied from:\nhttps://keras.io/examples/structured_data/structured_data_classification_from_scratch/\nhttps://github.com/keras-team/keras-io/blob/master/examples/structured_data/structured_data_classification_from_scratch.py\n\nOriginal code is under Apache-2.0 License (see LICENSE).\n\"\"\"\n\nimport tensorflow as tf\nimport pandas as pd\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers import IntegerLookup\nfrom tensorflow.keras.layers import Normalization\nfrom tensorflow.keras.layers import StringLookup\nimport mlflow\nimport mlflow.tensorflow\nimport typer\n\n\ndef main(batch_size: int = 32, epochs: int = 50, units: int = 32, dropout: float = 0.5):\n print(\"MLflow tracking URI:\", mlflow.get_tracking_uri())\n mlflow.tensorflow.autolog()\n mlflow.log_param(\"units\", units)\n mlflow.log_param(\"dropout\", dropout)\n\n train_ds, val_ds = prepare_data(batch_size)\n model = build_model(units, dropout, train_ds)\n model.fit(train_ds, epochs=epochs, validation_data=val_ds)\n\n\ndef prepare_data(batch_size: int):\n file_url = \"http://storage.googleapis.com/download.tensorflow.org/data/heart.csv\"\n dataframe = pd.read_csv(file_url)\n print(dataframe.head())\n\n val_dataframe = dataframe.sample(frac=0.2, random_state=1337)\n train_dataframe = dataframe.drop(val_dataframe.index)\n print(\n \"Using %d samples for training and %d for validation\"\n % (len(train_dataframe), len(val_dataframe))\n )\n\n train_ds = dataframe_to_dataset(train_dataframe).batch(batch_size)\n val_ds = dataframe_to_dataset(val_dataframe).batch(batch_size)\n\n return train_ds, val_ds\n\n\ndef build_model(units: int, dropout: float, train_ds):\n # Categorical features encoded as integers\n sex = keras.Input(shape=(1,), name=\"sex\", dtype=\"int64\")\n cp = keras.Input(shape=(1,), name=\"cp\", dtype=\"int64\")\n fbs = keras.Input(shape=(1,), name=\"fbs\", dtype=\"int64\")\n restecg = keras.Input(shape=(1,), name=\"restecg\", dtype=\"int64\")\n exang = keras.Input(shape=(1,), name=\"exang\", dtype=\"int64\")\n ca = keras.Input(shape=(1,), name=\"ca\", dtype=\"int64\")\n\n # Categorical feature encoded as string\n thal = keras.Input(shape=(1,), name=\"thal\", dtype=\"string\")\n\n # Numerical features\n age = keras.Input(shape=(1,), name=\"age\")\n trestbps = keras.Input(shape=(1,), name=\"trestbps\")\n chol = keras.Input(shape=(1,), name=\"chol\")\n thalach = keras.Input(shape=(1,), name=\"thalach\")\n oldpeak = keras.Input(shape=(1,), name=\"oldpeak\")\n slope = keras.Input(shape=(1,), name=\"slope\")\n\n all_inputs = [\n sex,\n cp,\n fbs,\n restecg,\n exang,\n ca,\n thal,\n age,\n trestbps,\n chol,\n thalach,\n oldpeak,\n slope,\n ]\n\n # Integer categorical features\n sex_encoded = encode_categorical_feature(sex, \"sex\", train_ds, False)\n cp_encoded = encode_categorical_feature(cp, \"cp\", train_ds, False)\n fbs_encoded = encode_categorical_feature(fbs, \"fbs\", train_ds, False)\n restecg_encoded = encode_categorical_feature(restecg, \"restecg\", train_ds, False)\n exang_encoded = encode_categorical_feature(exang, \"exang\", train_ds, False)\n ca_encoded = encode_categorical_feature(ca, \"ca\", train_ds, False)\n\n # String categorical features\n thal_encoded = encode_categorical_feature(thal, \"thal\", train_ds, True)\n\n # Numerical features\n age_encoded = encode_numerical_feature(age, \"age\", train_ds)\n trestbps_encoded = encode_numerical_feature(trestbps, \"trestbps\", train_ds)\n chol_encoded = encode_numerical_feature(chol, \"chol\", train_ds)\n thalach_encoded = encode_numerical_feature(thalach, \"thalach\", train_ds)\n oldpeak_encoded = encode_numerical_feature(oldpeak, \"oldpeak\", train_ds)\n slope_encoded = encode_numerical_feature(slope, \"slope\", train_ds)\n\n all_features = layers.concatenate(\n [\n sex_encoded,\n cp_encoded,\n fbs_encoded,\n restecg_encoded,\n exang_encoded,\n slope_encoded,\n ca_encoded,\n thal_encoded,\n age_encoded,\n trestbps_encoded,\n chol_encoded,\n thalach_encoded,\n oldpeak_encoded,\n ]\n )\n x = layers.Dense(units, activation=\"relu\")(all_features)\n x = layers.Dropout(dropout)(x)\n output = layers.Dense(1, activation=\"sigmoid\")(x)\n model = keras.Model(all_inputs, output)\n model.compile(\"adam\", \"binary_crossentropy\", metrics=[\"accuracy\"])\n\n return model\n\n\ndef dataframe_to_dataset(dataframe):\n dataframe = dataframe.copy()\n labels = dataframe.pop(\"target\")\n ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))\n ds = ds.shuffle(buffer_size=len(dataframe))\n return ds\n\n\ndef encode_numerical_feature(feature, name, dataset):\n # Create a Normalization layer for our feature\n normalizer = Normalization()\n\n # Prepare a Dataset that only yields our feature\n feature_ds = dataset.map(lambda x, y: x[name])\n feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))\n\n # Learn the statistics of the data\n normalizer.adapt(feature_ds)\n\n # Normalize the input feature\n encoded_feature = normalizer(feature)\n return encoded_feature\n\n\ndef encode_categorical_feature(feature, name, dataset, is_string):\n lookup_class = StringLookup if is_string else IntegerLookup\n # Create a lookup layer which will turn strings into integer indices\n lookup = lookup_class(output_mode=\"binary\")\n\n # Prepare a Dataset that only yields our feature\n feature_ds = dataset.map(lambda x, y: x[name])\n feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))\n\n # Learn the set of possible string values and assign them a fixed integer index\n lookup.adapt(feature_ds)\n\n # Turn the string input into integer indices\n encoded_feature = lookup(feature)\n return encoded_feature\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n"
]
| [
[
"tensorflow.keras.layers.Normalization",
"tensorflow.keras.Input",
"tensorflow.expand_dims",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.Model",
"pandas.read_csv",
"tensorflow.keras.layers.concatenate"
]
]
|
Ryogo-san/ivr | [
"0b312df6ec6bde77cae5ce4e957e5f1cd880463c"
]
| [
"src/attack/fgsm.py"
]
| [
"import torch\nimport torch.nn as nn\n\nimport attack\nfrom attack import Attack\n\n\nclass FGSM(Attack):\n def __init__(self, model, device, cfg, eps=0.007):\n super().__init__(model)\n self.cfg = cfg\n self.eps = self.cfg.pert_eps\n self.device = device\n\n def forward(self, images, labels):\n images = images\n labels = labels\n loss_func = nn.CrossEntropyLoss()\n\n images.requires_grad = True\n methods, letters = self.model(images)\n loss1 = loss_func(methods, labels[:, 0])\n loss2 = loss_func(letters, labels[:, 1])\n loss = self.cfg.alpha * loss1 + (1 - self.cfg.alpha) * loss2\n\n grad = torch.autograd.grad(\n loss, images, retain_graph=False, create_graph=False\n )[0]\n adv_images = images + self.eps * grad.sign()\n adv_images = torch.clamp(adv_images, min=0, max=1).detach()\n\n return adv_images\n"
]
| [
[
"torch.autograd.grad",
"torch.nn.CrossEntropyLoss",
"torch.clamp"
]
]
|
c-minz/Python-causets | [
"c19fad74abc9d5ac8a060b3bc9455ce4ae30f735"
]
| [
"sprinkledcauset.py"
]
| [
"#!/usr/bin/env python\r\n'''\r\nCreated on 22 Jul 2020\r\n\r\n@author: Christoph Minz\r\n@license: BSD 3-Clause\r\n'''\r\nfrom __future__ import annotations\r\nfrom typing import Set, List, Iterable, Union\r\nimport numpy as np\r\nimport math\r\nfrom numpy.random import default_rng\r\nfrom causets.causetevent import CausetEvent\r\nfrom causets.embeddedcauset import EmbeddedCauset\r\nfrom causets.shapes import CoordinateShape\r\nfrom causets.spacetimes import Spacetime\r\n\r\n\r\nclass SprinkledCauset(EmbeddedCauset):\r\n '''\r\n Handles a causal set that is embedded in a subset of a manifold.\r\n '''\r\n\r\n _intensity: float\r\n\r\n def __init__(self,\r\n card: int = 0, intensity: float = 0.0,\r\n dim: int = -1,\r\n spacetime: Spacetime = None,\r\n shape: Union[str, CoordinateShape] = None) -> None:\r\n '''\r\n Generates a sprinkled causal set by sprinkling in a \r\n spacetime subset. \r\n The arguments `dim`, `shape` and `spacetime` are handled by the \r\n super class `EmbeddedCauset` before events are sprinkled.\r\n\r\n 'card': int\r\n Number of sprinkled events.\r\n\r\n 'intensity': float\r\n Sprinkling intensity parameter, the expected number of \r\n sprinkled events.\r\n '''\r\n # initialise base class (EmbeddedCauset):\r\n super().__init__(spacetime=spacetime, shape=shape, dim=dim)\r\n # sprinkle:\r\n self._intensity = 0.0\r\n if card > 0:\r\n self.sprinkle(card)\r\n else:\r\n self.intensify(intensity)\r\n\r\n @property\r\n def Intensity(self) -> float:\r\n '''\r\n Returns the sprinkling intensity, which is the expected \r\n number of sprinkled events. The exact number of sprinkled \r\n events is given by the property 'Card'.\r\n '''\r\n return self._intensity\r\n\r\n @property\r\n def Density(self) -> float: # overwrites superclass\r\n return self._intensity / self.Shape.Volume\r\n\r\n @property\r\n def LengthScale(self) -> float: # overwrites superclass\r\n return (self.Shape.Volume / self._intensity)**(1.0 / self.Dim)\r\n\r\n def _sprinkle_coords(self, count: int, shape: CoordinateShape,\r\n rng) -> np.ndarray:\r\n if count < 0:\r\n raise ValueError('The sprinkle cardinality has to ' +\r\n 'be a non-negative integer.')\r\n coords: np.ndarray = np.empty((count, self.Dim),\r\n dtype=np.float32)\r\n if shape.Name in ('cube', 'cuboid'):\r\n # Create rectangle based sprinkle:\r\n low: np.ndarray\r\n high: np.ndarray\r\n if shape.Name == 'cuboid':\r\n low = shape.Center - \\\r\n shape.Parameter('edges') / 2\r\n high = shape.Center + \\\r\n shape.Parameter('edges') / 2\r\n else:\r\n low = shape.Center - \\\r\n shape.Parameter('edge') / 2\r\n high = shape.Center + \\\r\n shape.Parameter('edge') / 2\r\n for i in range(count):\r\n coords[i, :] = rng.uniform(low, high)\r\n elif shape.Name in ('ball', 'cylinder', 'diamond'):\r\n # Create circle based sprinkle:\r\n isCylindrical: bool = 'cylinder' in shape.Name\r\n isDiamond: bool = 'diamond' in shape.Name\r\n d: int = self.Dim\r\n b_r: float = shape.Parameter('radius')\r\n if (d == 2) and isDiamond:\r\n # pick `count` random coordinate tuples uniformly:\r\n uv: np.ndarray = rng.uniform(low=-1.0, high=1.0,\r\n size=(count, 2))\r\n coords[:, 0] = uv[:, 0] + uv[:, 1]\r\n coords[:, 1] = uv[:, 0] - uv[:, 1]\r\n coords *= b_r / 2\r\n else:\r\n b_dstart: int = 0 if shape.Name == 'ball' else 1\r\n b_d: int = d - b_dstart\r\n if isCylindrical:\r\n # set time coordinate:\r\n time_low: float = shape.Center[0] - \\\r\n shape.Parameter('duration') / 2\r\n time_high: float = shape.Center[0] + \\\r\n shape.Parameter('duration') / 2\r\n coords[:, 0] = rng.uniform(time_low, time_high,\r\n size=(count,))\r\n # pick `count` random coordinate tuples uniformly:\r\n r_low: float = shape.Parameter('hollow')**b_d\r\n for i in range(count):\r\n # get coordinates on sphere using normal distribution:\r\n coord: np.ndarray = rng.standard_normal(size=(b_d,))\r\n r: float = np.sqrt(sum(np.square(coord)))\r\n r_scaling: float\r\n r_scaling = rng.uniform(low=r_low)**(1.0 / b_d)\r\n if isDiamond:\r\n # set time coordinate:\r\n h_squeeze: float = rng.uniform()**(1.0 / d)\r\n h_sign: float = np.sign(\r\n rng.uniform(low=-1.0, high=1.0))\r\n coords[i, 0] = h_sign * (1 - h_squeeze) * b_r\r\n # adjust scaling:\r\n r_scaling *= h_squeeze\r\n coords[i, b_dstart:] = shape.Center[b_dstart:] + \\\r\n (r_scaling * b_r / r) * coord\r\n return coords\r\n\r\n def sprinkle(self, count: int, rng=default_rng(),\r\n shape: CoordinateShape = None) -> Set[CausetEvent]:\r\n '''\r\n Creates a fixed number of new events by sprinkling into `shape` \r\n (by default the entire embedding region).\r\n '''\r\n if count < 0:\r\n raise ValueError('The sprinkle cardinality has to ' +\r\n 'be a non-negative integer.')\r\n self._intensity += float(count)\r\n if shape is None:\r\n shape = self.Shape\r\n coords: np.ndarray = self._sprinkle_coords(count, shape, rng)\r\n return super().create(coords)\r\n\r\n def intensify(self, intensity: float, rng=default_rng(),\r\n shape: CoordinateShape = None) -> Set[CausetEvent]:\r\n '''\r\n Creates an expected number of new events by sprinkling into \r\n `shape` (by default the entire embedding region). The expected \r\n number is determined by the Poisson distribution with the \r\n given `intensity` parameter.\r\n '''\r\n if intensity < 0.0:\r\n raise ValueError('The intensity parameter has to ' +\r\n 'be a non-negative float.')\r\n self._intensity += intensity\r\n count: int = int(rng.poisson(lam=intensity))\r\n if shape is None:\r\n shape = self.Shape\r\n coords: np.ndarray = self._sprinkle_coords(count, shape, rng)\r\n return super().create(coords)\r\n\r\n def create(self, coords: Union[Iterable[List[float]],\r\n Iterable[np.ndarray],\r\n np.ndarray],\r\n labelFormat: str = None,\r\n relate: bool = True) -> Set[CausetEvent]:\r\n card_old: float = float(self.Card)\r\n eventSet: Set[CausetEvent] = super().create(\r\n coords, labelFormat, relate)\r\n self._intensity += (float(self.Card) - card_old)\r\n return eventSet\r\n\r\n def add(self, eventSet: Iterable, unlink: bool = False) -> None:\r\n card_old: float = float(self.Card)\r\n super().add(eventSet, unlink)\r\n self._intensity += (float(self.Card) - card_old)\r\n\r\n def discard(self, eventSet: Iterable, unlink: bool = False) -> None:\r\n card_old: float = float(self.Card)\r\n super().discard(eventSet, unlink)\r\n self._intensity *= (float(self.Card) / card_old)\r\n"
]
| [
[
"numpy.square",
"numpy.empty",
"numpy.random.default_rng"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.