repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
cicicici/deeptensor | [
"efcd7b9ca2d758cb2461b64fa5ba1268685e4dab"
] | [
"deeptensor/model/timm/dla.py"
] | [
"\"\"\" Deep Layer Aggregation and DLA w/ Res2Net\nDLA original adapted from Official Pytorch impl at:\nDLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484\n\nRes2Net additions from: https://github.com/gasvn/Res2Net/\nRes2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169\n\"\"\"\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .registry import register_model\nfrom .helpers import load_pretrained\nfrom .adaptive_avgmax_pool import SelectAdaptivePool2d\nfrom .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\n\n\n__all__ = ['DLA']\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bilinear',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'base_layer.0', 'classifier': 'fc',\n **kwargs\n }\n\n\ndefault_cfgs = {\n 'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'),\n 'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'),\n 'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'),\n 'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'),\n 'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'),\n 'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'),\n 'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'),\n 'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'),\n 'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'),\n 'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'),\n 'dla60_res2net': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'),\n 'dla60_res2next': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'),\n}\n\n\nclass DlaBasic(nn.Module):\n \"\"\"DLA Basic\"\"\"\n def __init__(self, inplanes, planes, stride=1, dilation=1, **_):\n super(DlaBasic, self).__init__()\n self.conv1 = nn.Conv2d(\n inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(planes)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DlaBottleneck(nn.Module):\n \"\"\"DLA/DLA-X Bottleneck\"\"\"\n expansion = 2\n\n def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64):\n super(DlaBottleneck, self).__init__()\n self.stride = stride\n mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)\n mid_planes = mid_planes // self.expansion\n\n self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(mid_planes)\n self.conv2 = nn.Conv2d(\n mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation,\n bias=False, dilation=dilation, groups=cardinality)\n self.bn2 = nn.BatchNorm2d(mid_planes)\n self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(outplanes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DlaBottle2neck(nn.Module):\n \"\"\" Res2Net/Res2NeXT DLA Bottleneck\n Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py\n \"\"\"\n expansion = 2\n\n def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4):\n super(DlaBottle2neck, self).__init__()\n self.is_first = stride > 1\n self.scale = scale\n mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)\n mid_planes = mid_planes // self.expansion\n self.width = mid_planes\n\n self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(mid_planes * scale)\n\n num_scale_convs = max(1, scale - 1)\n convs = []\n bns = []\n for _ in range(num_scale_convs):\n convs.append(nn.Conv2d(\n mid_planes, mid_planes, kernel_size=3, stride=stride,\n padding=dilation, dilation=dilation, groups=cardinality, bias=False))\n bns.append(nn.BatchNorm2d(mid_planes))\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList(bns)\n if self.is_first:\n self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)\n\n self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(outplanes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n spx = torch.split(out, self.width, 1)\n spo = []\n for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):\n sp = spx[i] if i == 0 or self.is_first else sp + spx[i]\n sp = conv(sp)\n sp = bn(sp)\n sp = self.relu(sp)\n spo.append(sp)\n if self.scale > 1 :\n spo.append(self.pool(spx[-1]) if self.is_first else spx[-1])\n out = torch.cat(spo, 1)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DlaRoot(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, residual):\n super(DlaRoot, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2)\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n self.residual = residual\n\n def forward(self, *x):\n children = x\n x = self.conv(torch.cat(x, 1))\n x = self.bn(x)\n if self.residual:\n x += children[0]\n x = self.relu(x)\n\n return x\n\n\nclass DlaTree(nn.Module):\n def __init__(self, levels, block, in_channels, out_channels, stride=1,\n dilation=1, cardinality=1, base_width=64,\n level_root=False, root_dim=0, root_kernel_size=1, root_residual=False):\n super(DlaTree, self).__init__()\n if root_dim == 0:\n root_dim = 2 * out_channels\n if level_root:\n root_dim += in_channels\n cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width)\n if levels == 1:\n self.tree1 = block(in_channels, out_channels, stride, **cargs)\n self.tree2 = block(out_channels, out_channels, 1, **cargs)\n else:\n cargs.update(dict(root_kernel_size=root_kernel_size, root_residual=root_residual))\n self.tree1 = DlaTree(\n levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs)\n self.tree2 = DlaTree(\n levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs)\n if levels == 1:\n self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_residual)\n self.level_root = level_root\n self.root_dim = root_dim\n self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else None\n self.project = None\n if in_channels != out_channels:\n self.project = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n self.levels = levels\n\n def forward(self, x, residual=None, children=None):\n children = [] if children is None else children\n bottom = self.downsample(x) if self.downsample else x\n residual = self.project(bottom) if self.project else bottom\n if self.level_root:\n children.append(bottom)\n x1 = self.tree1(x, residual)\n if self.levels == 1:\n x2 = self.tree2(x1)\n x = self.root(x2, x1, *children)\n else:\n children.append(x1)\n x = self.tree2(x1, children=children)\n return x\n\n\nclass DLA(nn.Module):\n def __init__(self, levels, channels, num_classes=1000, in_chans=3, cardinality=1, base_width=64,\n block=DlaBottle2neck, residual_root=False, linear_root=False,\n drop_rate=0.0, global_pool='avg'):\n super(DLA, self).__init__()\n self.channels = channels\n self.num_classes = num_classes\n self.cardinality = cardinality\n self.base_width = base_width\n self.drop_rate = drop_rate\n\n self.base_layer = nn.Sequential(\n nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False),\n nn.BatchNorm2d(channels[0]),\n nn.ReLU(inplace=True))\n self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])\n self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2)\n cargs = dict(cardinality=cardinality, base_width=base_width, root_residual=residual_root)\n self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs)\n self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs)\n self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs)\n self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs)\n\n self.num_features = channels[-1]\n self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)\n self.fc = nn.Conv2d(self.num_features * self.global_pool.feat_mult(), num_classes, 1, bias=True)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):\n modules = []\n for i in range(convs):\n modules.extend([\n nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1,\n padding=dilation, bias=False, dilation=dilation),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True)])\n inplanes = planes\n return nn.Sequential(*modules)\n\n def get_classifier(self):\n return self.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.num_classes = num_classes\n self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)\n if num_classes:\n self.fc = nn.Conv2d(self.num_features * self.global_pool.feat_mult(), num_classes, 1, bias=True)\n else:\n self.fc = None\n\n def forward_features(self, x):\n x = self.base_layer(x)\n x = self.level0(x)\n x = self.level1(x)\n x = self.level2(x)\n x = self.level3(x)\n x = self.level4(x)\n x = self.level5(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.global_pool(x)\n if self.drop_rate > 0.:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = self.fc(x)\n return x.flatten(1)\n\n\n@register_model\ndef dla60_res2net(pretrained=None, num_classes=1000, in_chans=3, **kwargs):\n default_cfg = default_cfgs['dla60_res2net']\n model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),\n block=DlaBottle2neck, cardinality=1, base_width=28,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60_res2next(pretrained=None, num_classes=1000, in_chans=3, **kwargs):\n default_cfg = default_cfgs['dla60_res2next']\n model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),\n block=DlaBottle2neck, cardinality=8, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla34(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-34\n default_cfg = default_cfgs['dla34']\n model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], block=DlaBasic, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla46_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-46-C\n default_cfg = default_cfgs['dla46_c']\n model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],\n block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla46x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-46-C\n default_cfg = default_cfgs['dla46x_c']\n model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],\n block=DlaBottleneck, cardinality=32, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60-C\n default_cfg = default_cfgs['dla60x_c']\n model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 64, 64, 128, 256],\n block=DlaBottleneck, cardinality=32, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-60\n default_cfg = default_cfgs['dla60']\n model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60\n default_cfg = default_cfgs['dla60x']\n model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, cardinality=32, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla102(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-102\n default_cfg = default_cfgs['dla102']\n model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla102x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102\n default_cfg = default_cfgs['dla102x']\n model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, cardinality=32, base_width=4, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla102x2(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102 64\n default_cfg = default_cfgs['dla102x2']\n model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, cardinality=64, base_width=4, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla169(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-169\n default_cfg = default_cfgs['dla169']\n model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n"
] | [
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.split",
"torch.nn.functional.dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
ben-hayes/torchcrepe | [
"9179f110313d1a6f046351fbad1ae72a4f0fd6fa"
] | [
"tests/test_threshold.py"
] | [
"import torch\n\nimport torchcrepe\n\n\n###############################################################################\n# Test threshold.py\n###############################################################################\n\n\ndef test_at():\n \"\"\"Test torchcrepe.threshold.At\"\"\"\n input_pitch = torch.tensor([100., 110., 120., 130., 140.])\n harmonicity = torch.tensor([.19, .22, .25, .17, .30])\n\n # Perform thresholding\n output_pitch = torchcrepe.threshold.At(.20)(input_pitch, harmonicity)\n\n # Ensure thresholding is not in-place\n assert not (input_pitch == output_pitch).all()\n\n # Ensure certain frames are marked as unvoiced\n isnan = torch.isnan(output_pitch)\n assert isnan[0] and isnan[3]\n assert not isnan[1] and not isnan[2] and not isnan[4]\n"
] | [
[
"torch.tensor",
"torch.isnan"
]
] |
ChrisBremer/can-scrapers | [
"a91123368f8473a2778c4efcc40855b2fd631306"
] | [
"db/bin/populate_locations.py"
] | [
"import pandas as pd\nfrom can_tools.scrapers.uscensus.geo import USGeoBaseAPI\n\nd = USGeoBaseAPI(\"state\")\ndf_s = d.get()\ndf_s[\"location_type\"] = 2\n\nd = USGeoBaseAPI(\"county\")\ndf_c = d.get()\ndf_c[\"location_type\"] = 1\n\ndf = pd.DataFrame(pd.concat([df_s, df_c], ignore_index=True))\ndf[\"fullname\"] = df[\"fullname\"].str.replace(\"'\", \"''\")\ndf[\"name\"] = df[\"name\"].str.replace(\"'\", \"''\")\n\nfmt = \"({location}, {location_type}, '{state}', {area}, {latitude}, {longitude}, '{fullname}', '{name}')\"\n\ninserts = \", \".join(fmt.format(**x) for _, x in df.iterrows())\n\noutput = (\n \"\"\"\nINSERT INTO meta.locations (location, location_type, state, area, latitude, longitude, fullname, name)\nVALUES\n\"\"\"\n + \" \"\n + inserts\n + \";\"\n)\n\nprint(output)\n"
] | [
[
"pandas.concat"
]
] |
mjuric/sbpy | [
"082ad903cb9b21ae961e4c72b666a8242539382a"
] | [
"sbpy/spectroscopy/tests/test_sources.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport sys\nimport pytest\nimport numpy as np\nimport astropy.units as u\nfrom astropy.tests.helper import remote_data\nfrom astropy.modeling.blackbody import blackbody_nu, blackbody_lambda\nimport synphot\nfrom .. import sources\nfrom ..sources import (BlackbodySource, SinglePointSpectrumError,\n SpectralSource, SynphotRequired)\nfrom ... import bib, units\nfrom ...photometry import bandpass\n\nV = bandpass('johnson v')\nI = bandpass('cousins i')\n\n\nclass Star(SpectralSource):\n def __init__(self):\n super().__init__(synphot.SourceSpectrum(\n synphot.ConstFlux1D, amplitude=1 * u.W / u.m**2 / u.um))\n\n\nclass TestSpectralSource:\n def test_init_error(self, monkeypatch):\n with pytest.raises(SynphotRequired):\n monkeypatch.setattr(sources, 'synphot', None)\n Star()\n\n @pytest.mark.parametrize('wfb, interpolate', (\n ([V], False),\n ([1] * u.um, True),\n ([1, 2, 3] * u.um, False)\n ))\n def test_observe(self, wfb, interpolate):\n s = Star()\n fluxd = s.observe(wfb, unit='W/(m2 um)', interpolate=interpolate)\n assert np.isclose(fluxd.value, 1.0).all()\n\n def test_observe_bad_wfb(self):\n with pytest.raises(TypeError):\n s = Star()\n s.observe(np.arange(5))\n\n @pytest.mark.parametrize('wfb, unit, atol', (\n ((V, I), u.ABmag, 0.006),\n ((600 * u.nm, 750 * u.nm), u.ABmag, 1e-6),\n ((750 * u.GHz, 600 * u.GHz), u.ABmag, 1e-6),\n ))\n def test_color_index(self, wfb, unit, atol):\n s = Star()\n eff_wave, ci = s.color_index(wfb, unit)\n test = -5 * np.log10((eff_wave.min() / eff_wave.max()).value)\n assert np.isclose(ci.value, test, atol=atol)\n\n def test_color_index_typeerror(self):\n s = Star()\n with pytest.raises(TypeError):\n s.color_index(np.arange(2), unit=u.ABmag)\n\n\nclass TestBlackbodySource:\n @pytest.mark.parametrize('T', (\n 300, 300 * u.K\n ))\n def test_init_temperature(self, T):\n BB = BlackbodySource(T)\n assert BB.T.value == 300\n\n def test_init_temperature_error(self):\n with pytest.raises(TypeError):\n BlackbodySource()\n\n def test_repr(self):\n BB = BlackbodySource(278)\n assert repr(BB) == '<BlackbodySource: T=278.0 K>'\n\n @pytest.mark.parametrize('B', (blackbody_nu, blackbody_lambda))\n def test_call(self, B):\n w = np.logspace(-0.5, 3) * u.um\n f = B(w, 300 * u.K) * np.pi * u.sr\n BB = BlackbodySource(300 * u.K)\n test = BB(w, unit=f.unit).value\n assert np.allclose(test, f.value)\n"
] | [
[
"numpy.allclose",
"numpy.arange",
"numpy.isclose",
"numpy.logspace"
]
] |
phamduyhk/signate_student_cup_2020 | [
"19e158b08a86f2df8e4ee45445169ae396c91409"
] | [
"utils/dataloader.py"
] | [
"# coding: utf-8\nimport glob\nimport os\nimport io\nimport string\nimport re\nimport random\nimport spacy\nimport torchtext\nfrom torchtext.vocab import Vectors\nimport pandas as pd\nimport torch\n\n\nclass Preprocessing():\n def __init__(self):\n pass\n\n def get_data(self, path, train_file, test_file, vectors, max_length, batch_size):\n \"\"\"\n :param path (str) : path to train and test data\n :param train_file (str) : train data file name (Except csv)\n :param test_file (str) : test data file name (Except csv)\n :param vector_list (str) : vector list file path\n :param max_length (int) : max length of output text\n :param batch_size (int) : batch size\n\n :return:\n train_dl\n val_dl\n test_dl\n TEXT: \"comment_text\"\n\n :detail:\n LABEL<1,2,3,4,5,6>: \"toxic\",\"severe_toxic\",\"obscene\",\"threat\",\"insult\",\"identity_hate\"\n \"\"\"\n # データを読み込んだときに、読み込んだ内容に対して行う処理を定義します\n TEXT = torchtext.data.Field(sequential=True, tokenize=self.tokenizer_with_preprocessing, use_vocab=True,\n lower=True, include_lengths=True, batch_first=True, fix_length=max_length,\n init_token=\"<cls>\", eos_token=\"<eos>\")\n LABEL = torchtext.data.Field(sequential=False, use_vocab=False)\n\n temp_path = self.reformat_csv_header(\n path=path, train_file=train_file, test_file=test_file)\n\n train_val_ds, test_ds = torchtext.data.TabularDataset.splits(\n path=temp_path, train=train_file,\n test=test_file, format='csv',\n fields=[('Text', TEXT), ('jobflag', LABEL)])\n\n train_ds, val_ds = train_val_ds.split(\n split_ratio=0.7, random_state=random.seed(2395))\n\n # torchtextで単語ベクトルとして英語学習済みモデルを読み込みます\n english_fasttext_vectors = Vectors(name=vectors,cache=path)\n\n # ベクトル化したバージョンのボキャブラリーを作成します\n TEXT.build_vocab(\n train_ds, vectors=english_fasttext_vectors, min_freq=10)\n\n # DataLoaderを作成します(torchtextの文脈では単純にiteraterと呼ばれています)\n train_dl = torchtext.data.Iterator(\n train_ds, batch_size=batch_size, train=True)\n\n val_dl = torchtext.data.Iterator(\n val_ds, batch_size=batch_size, train=False, sort=False)\n\n test_dl = torchtext.data.Iterator(\n test_ds, batch_size=batch_size, train=False, sort=False)\n\n return train_dl, val_dl, test_dl, TEXT\n\n def reformat_csv_header(self, path, train_file, test_file):\n \"\"\"\n remove index col in csv file\n :arg\n :param path (str) : path to train and test data\n :param train_file (str) : train data file name (Except csv)\n :param test_file (str) : test data file name (Except csv)\n\n Return:\n temp path\n \"\"\"\n\n \"\"\"\n \"id\",\"comment_text\",\"toxic\",\"severe_toxic\",\"obscene\",\"threat\",\"insult\",\"identity_hate\"\n \"\"\"\n\n train = pd.read_csv(os.path.join(path, train_file))\n test = pd.read_csv(os.path.join(path, test_file))\n train = train.drop('id', axis=1)\n test = test.drop('id', axis=1)\n for label in [\"jobflag\"]:\n test[label] = pd.Series(0, index=test.index)\n temp_path = os.path.join(path, \"temp\")\n if not os.path.isdir(temp_path):\n os.mkdir(temp_path)\n train.to_csv(os.path.join(temp_path, train_file),\n index=False, header=False)\n test.to_csv(os.path.join(temp_path, test_file),\n index=False, header=False)\n return temp_path\n\n @staticmethod\n def preprocessing_text(text):\n # 改行コードを消去\n text = re.sub('¥n', '', text)\n\n # 数字を消去\n for num in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:\n text = re.sub(num, '', text)\n\n # カンマ、ピリオド以外の記号をスペースに置換\n for p in string.punctuation:\n if (p == \".\") or (p == \",\"):\n continue\n else:\n text = text.replace(p, \" \")\n\n # ピリオドなどの前後にはスペースを入れておく\n text = text.replace(\".\", \" . \")\n text = text.replace(\",\", \" , \")\n return text\n\n # 分かち書き(今回はデータが英語で、簡易的にスペースで区切る)\n @staticmethod\n def tokenizer_punctuation(text):\n return text.strip().split()\n\n # 前処理と分かち書きをまとめた関数を定義\n def tokenizer_with_preprocessing(self, text):\n text = self.preprocessing_text(text)\n ret = self.tokenizer_punctuation(text)\n return ret\n\n\nif __name__ == '__main__':\n path = \"../data/\"\n train_file = \"train.csv\"\n test_file = \"test.csv\"\n vector_list = \"../data/wiki-news-300d-1M.vec\"\n instance = Preprocessing()\n train_dl, val_dl, test_dl, TEXT = instance.get_data(path=path, train_file=train_file, test_file=test_file,\n vectors=vector_list, max_length=256,\n batch_size=1280)\n"
] | [
[
"pandas.Series"
]
] |
RogerFrigola/probability | [
"cfb507b7ede2c1ba753bffc5ea827b9c97c37bdc"
] | [
"tensorflow_probability/python/distributions/sample_stats.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Functions for computing statistics of samples.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.internal import distribution_util as util\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import control_flow_ops\n\n__all__ = [\n \"auto_correlation\",\n \"percentile\",\n]\n\n\n# TODO(langmore) Write separate versions of this for real/complex dtype, taking\n# advantage of optimized real-fft ops.\ndef auto_correlation(\n x,\n axis=-1,\n max_lags=None,\n center=True,\n normalize=True,\n name=\"auto_correlation\"):\n \"\"\"Auto correlation along one axis.\n\n Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation\n `RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)\n\n ```\n RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },\n W[n] := (X[n] - MU) / S,\n MU := E{ X[0] },\n S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.\n ```\n\n This function takes the viewpoint that `x` is (along one axis) a finite\n sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an\n estimate of `RXX[m]` as follows:\n\n After extending `x` from length `L` to `inf` by zero padding, the auto\n correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as\n\n ```\n rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),\n w[n] := (x[n] - mu) / s,\n mu := L**-1 sum_n x[n],\n s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)\n ```\n\n The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users\n often set `max_lags` small enough so that the entire output is meaningful.\n\n Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by\n `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation\n contains a slight bias, which goes to zero as `len(x) - m --> infinity`.\n\n Args:\n x: `float32` or `complex64` `Tensor`.\n axis: Python `int`. The axis number along which to compute correlation.\n Other dimensions index different batch members.\n max_lags: Positive `int` tensor. The maximum value of `m` to consider\n (in equation above). If `max_lags >= x.shape[axis]`, we effectively\n re-set `max_lags` to `x.shape[axis] - 1`.\n center: Python `bool`. If `False`, do not subtract the mean estimate `mu`\n from `x[n]` when forming `w[n]`.\n normalize: Python `bool`. If `False`, do not divide by the variance\n estimate `s**2` when forming `w[n]`.\n name: `String` name to prepend to created ops.\n\n Returns:\n `rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for\n `i != axis`, and `rxx.shape[axis] = max_lags + 1`.\n\n Raises:\n TypeError: If `x` is not a supported type.\n \"\"\"\n # Implementation details:\n # Extend length N / 2 1-D array x to length N by zero padding onto the end.\n # Then, set\n # F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.\n # It is not hard to see that\n # F[x]_k Conj(F[x]_k) = F[R]_k, where\n # R_m := sum_n x_n Conj(x_{(n - m) mod N}).\n # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].\n\n # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT\n # based version of estimating RXX.\n # Note that this is a special case of the Wiener-Khinchin Theorem.\n with tf.name_scope(name, values=[x]):\n x = tf.convert_to_tensor(x, name=\"x\")\n\n # Rotate dimensions of x in order to put axis at the rightmost dim.\n # FFT op requires this.\n rank = util.prefer_static_rank(x)\n if axis < 0:\n axis = rank + axis\n shift = rank - 1 - axis\n # Suppose x.shape[axis] = T, so there are T \"time\" steps.\n # ==> x_rotated.shape = B + [T],\n # where B is x_rotated's batch shape.\n x_rotated = util.rotate_transpose(x, shift)\n\n if center:\n x_rotated -= tf.reduce_mean(x_rotated, axis=-1, keepdims=True)\n\n # x_len = N / 2 from above explanation. The length of x along axis.\n # Get a value for x_len that works in all cases.\n x_len = util.prefer_static_shape(x_rotated)[-1]\n\n # TODO(langmore) Investigate whether this zero padding helps or hurts. At\n # the moment is necessary so that all FFT implementations work.\n # Zero pad to the next power of 2 greater than 2 * x_len, which equals\n # 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).\n x_len_float64 = tf.cast(x_len, np.float64)\n target_length = tf.pow(\n np.float64(2.), tf.ceil(tf.log(x_len_float64 * 2) / np.log(2.)))\n pad_length = tf.cast(target_length - x_len_float64, np.int32)\n\n # We should have:\n # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]\n # = B + [T + pad_length]\n x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)\n\n dtype = x.dtype\n if not dtype.is_complex:\n if not dtype.is_floating:\n raise TypeError(\"Argument x must have either float or complex dtype\"\n \" found: {}\".format(dtype))\n x_rotated_pad = tf.complex(x_rotated_pad,\n dtype.real_dtype.as_numpy_dtype(0.))\n\n # Autocorrelation is IFFT of power-spectral density (up to some scaling).\n fft_x_rotated_pad = tf.fft(x_rotated_pad)\n spectral_density = fft_x_rotated_pad * tf.conj(fft_x_rotated_pad)\n # shifted_product is R[m] from above detailed explanation.\n # It is the inner product sum_n X[n] * Conj(X[n - m]).\n shifted_product = tf.ifft(spectral_density)\n\n # Cast back to real-valued if x was real to begin with.\n shifted_product = tf.cast(shifted_product, dtype)\n\n # Figure out if we can deduce the final static shape, and set max_lags.\n # Use x_rotated as a reference, because it has the time dimension in the far\n # right, and was created before we performed all sorts of crazy shape\n # manipulations.\n know_static_shape = True\n if not x_rotated.shape.is_fully_defined():\n know_static_shape = False\n if max_lags is None:\n max_lags = x_len - 1\n else:\n max_lags = tf.convert_to_tensor(max_lags, name=\"max_lags\")\n max_lags_ = tensor_util.constant_value(max_lags)\n if max_lags_ is None or not know_static_shape:\n know_static_shape = False\n max_lags = tf.minimum(x_len - 1, max_lags)\n else:\n max_lags = min(x_len - 1, max_lags_)\n\n # Chop off the padding.\n # We allow users to provide a huge max_lags, but cut it off here.\n # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]\n shifted_product_chopped = shifted_product[..., :max_lags + 1]\n\n # If possible, set shape.\n if know_static_shape:\n chopped_shape = x_rotated.shape.as_list()\n chopped_shape[-1] = min(x_len, max_lags + 1)\n shifted_product_chopped.set_shape(chopped_shape)\n\n # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The\n # other terms were zeros arising only due to zero padding.\n # `denominator = (N / 2 - m)` (defined below) is the proper term to\n # divide by to make this an unbiased estimate of the expectation\n # E[X[n] Conj(X[n - m])].\n x_len = tf.cast(x_len, dtype.real_dtype)\n max_lags = tf.cast(max_lags, dtype.real_dtype)\n denominator = x_len - tf.range(0., max_lags + 1.)\n denominator = tf.cast(denominator, dtype)\n shifted_product_rotated = shifted_product_chopped / denominator\n\n if normalize:\n shifted_product_rotated /= shifted_product_rotated[..., :1]\n\n # Transpose dimensions back to those of x.\n return util.rotate_transpose(shifted_product_rotated, -shift)\n\n\n# TODO(langmore) To make equivalent to numpy.percentile:\n# Make work with a sequence of floats or single float for 'q'.\n# Make work with \"linear\", \"midpoint\" interpolation. (linear should be default)\ndef percentile(x,\n q,\n axis=None,\n interpolation=None,\n keep_dims=False,\n validate_args=False,\n name=None):\n \"\"\"Compute the `q`-th percentile of `x`.\n\n Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the\n way from the minimum to the maximum in a sorted copy of `x`.\n\n The values and distances of the two nearest neighbors as well as the\n `interpolation` parameter will determine the percentile if the normalized\n ranking does not match the location of `q` exactly.\n\n This function is the same as the median if `q = 50`, the same as the minimum\n if `q = 0` and the same as the maximum if `q = 100`.\n\n\n ```python\n # Get 30th percentile with default ('nearest') interpolation.\n x = [1., 2., 3., 4.]\n percentile(x, q=30.)\n ==> 2.0\n\n # Get 30th percentile with 'lower' interpolation\n x = [1., 2., 3., 4.]\n percentile(x, q=30., interpolation='lower')\n ==> 1.0\n\n # Get 100th percentile (maximum). By default, this is computed over every dim\n x = [[1., 2.]\n [3., 4.]]\n percentile(x, q=100.)\n ==> 4.0\n\n # Treat the leading dim as indexing samples, and find the 100th quantile (max)\n # over all such samples.\n x = [[1., 2.]\n [3., 4.]]\n percentile(x, q=100., axis=[0])\n ==> [3., 4.]\n ```\n\n Compare to `numpy.percentile`.\n\n Args:\n x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,\n `x` must have statically known number of dimensions.\n q: Scalar `Tensor` in `[0, 100]`. The percentile.\n axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.\n The axis that hold independent samples over which to return the desired\n percentile. If `None` (the default), treat every dimension as a sample\n dimension, returning a scalar.\n interpolation : {\"lower\", \"higher\", \"nearest\"}. Default: \"nearest\"\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points `i < j`:\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j`, whichever is nearest.\n keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1\n If `False`, the last dimension is removed from the output shape.\n validate_args: Whether to add runtime checks of argument validity.\n If False, and arguments are incorrect, correct behavior is not guaranteed.\n name: A Python string name to give this `Op`. Default is \"percentile\"\n\n Returns:\n A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if\n `axis` is `None`, a scalar.\n\n Raises:\n ValueError: If argument 'interpolation' is not an allowed type.\n \"\"\"\n name = name or \"percentile\"\n allowed_interpolations = {\"lower\", \"higher\", \"nearest\"}\n\n if interpolation is None:\n interpolation = \"nearest\"\n else:\n if interpolation not in allowed_interpolations:\n raise ValueError(\"Argument 'interpolation' must be in %s. Found %s\" %\n (allowed_interpolations, interpolation))\n\n with tf.name_scope(name, values=[x, q]):\n x = tf.convert_to_tensor(x, name=\"x\")\n # Double is needed here and below, else we get the wrong index if the array\n # is huge along axis.\n q = tf.to_double(q, name=\"q\")\n _get_static_ndims(q, expect_ndims=0)\n\n if validate_args:\n q = control_flow_ops.with_dependencies([\n tf.assert_rank(q, 0),\n tf.assert_greater_equal(q, tf.to_double(0.)),\n tf.assert_less_equal(q, tf.to_double(100.))\n ], q)\n\n if axis is None:\n y = tf.reshape(x, [-1])\n else:\n axis = tf.convert_to_tensor(axis, name=\"axis\")\n tf.assert_integer(axis)\n axis_ndims = _get_static_ndims(\n axis, expect_static=True, expect_ndims_no_more_than=1)\n axis_const = tensor_util.constant_value(axis)\n if axis_const is None:\n raise ValueError(\n \"Expected argument 'axis' to be statically available. Found: %s\" %\n axis)\n axis = axis_const\n if axis_ndims == 0:\n axis = [axis]\n axis = [int(a) for a in axis]\n x_ndims = _get_static_ndims(\n x, expect_static=True, expect_ndims_at_least=1)\n axis = _make_static_axis_non_negative(axis, x_ndims)\n y = _move_dims_to_flat_end(x, axis, x_ndims)\n\n frac_at_q_or_above = 1. - q / 100.\n d = tf.to_double(tf.shape(y)[-1])\n\n if interpolation == \"lower\":\n index = tf.ceil((d - 1) * frac_at_q_or_above)\n elif interpolation == \"higher\":\n index = tf.floor((d - 1) * frac_at_q_or_above)\n elif interpolation == \"nearest\":\n index = tf.round((d - 1) * frac_at_q_or_above)\n\n # If d is gigantic, then we would have d == d - 1, even in double... So\n # let's use max/min to avoid out of bounds errors.\n d = tf.shape(y)[-1]\n # d - 1 will be distinct from d in int32.\n index = tf.clip_by_value(tf.to_int32(index), 0, d - 1)\n\n # Sort everything, not just the top 'k' entries, which allows multiple calls\n # to sort only once (under the hood) and use CSE.\n sorted_y = _sort_tensor(y)\n\n # result.shape = B\n result = sorted_y[..., index]\n result.set_shape(y.get_shape()[:-1])\n\n if keep_dims:\n if axis is None:\n # ones_vec = [1, 1,..., 1], total length = len(S) + len(B).\n ones_vec = tf.ones(shape=[_get_best_effort_ndims(x)], dtype=tf.int32)\n result *= tf.ones(ones_vec, dtype=x.dtype)\n else:\n result = _insert_back_keep_dims(result, axis)\n\n return result\n\n\ndef _get_static_ndims(x,\n expect_static=False,\n expect_ndims=None,\n expect_ndims_no_more_than=None,\n expect_ndims_at_least=None):\n \"\"\"Get static number of dimensions and assert that some expectations are met.\n\n This function returns the number of dimensions \"ndims\" of x, as a Python int.\n\n The optional expect arguments are used to check the ndims of x, but this is\n only done if the static ndims of x is not None.\n\n Args:\n x: A Tensor.\n expect_static: Expect `x` to have statically defined `ndims`.\n expect_ndims: Optional Python integer. If provided, assert that x has\n number of dimensions equal to this.\n expect_ndims_no_more_than: Optional Python integer. If provided, assert\n that x has no more than this many dimensions.\n expect_ndims_at_least: Optional Python integer. If provided, assert that\n x has at least this many dimensions.\n\n Returns:\n ndims: A Python integer.\n\n Raises:\n ValueError: If any of the expectations above are violated.\n \"\"\"\n ndims = x.get_shape().ndims\n if ndims is None:\n shape_const = tensor_util.constant_value(tf.shape(x))\n if shape_const is not None:\n ndims = shape_const.ndim\n\n if ndims is None:\n if expect_static:\n raise ValueError(\n \"Expected argument 'x' to have statically defined 'ndims'. Found: \" %\n x)\n return\n\n if expect_ndims is not None:\n ndims_message = (\"Expected argument 'x' to have ndims %s. Found tensor %s\"\n % (expect_ndims, x))\n if ndims != expect_ndims:\n raise ValueError(ndims_message)\n\n if expect_ndims_at_least is not None:\n ndims_at_least_message = (\n \"Expected argument 'x' to have ndims >= %d. Found tensor %s\" % (\n expect_ndims_at_least, x))\n if ndims < expect_ndims_at_least:\n raise ValueError(ndims_at_least_message)\n\n if expect_ndims_no_more_than is not None:\n ndims_no_more_than_message = (\n \"Expected argument 'x' to have ndims <= %d. Found tensor %s\" % (\n expect_ndims_no_more_than, x))\n if ndims > expect_ndims_no_more_than:\n raise ValueError(ndims_no_more_than_message)\n\n return ndims\n\n\ndef _get_best_effort_ndims(x,\n expect_ndims=None,\n expect_ndims_at_least=None,\n expect_ndims_no_more_than=None):\n \"\"\"Get static ndims if possible. Fallback on `tf.rank(x)`.\"\"\"\n ndims_static = _get_static_ndims(\n x,\n expect_ndims=expect_ndims,\n expect_ndims_at_least=expect_ndims_at_least,\n expect_ndims_no_more_than=expect_ndims_no_more_than)\n if ndims_static is not None:\n return ndims_static\n return tf.rank(x)\n\n\ndef _insert_back_keep_dims(x, axis):\n \"\"\"Insert the dims in `axis` back as singletons after being removed.\n\n Args:\n x: `Tensor`.\n axis: Python list of integers.\n\n Returns:\n `Tensor` with same values as `x`, but additional singleton dimensions.\n \"\"\"\n for i in sorted(axis):\n x = tf.expand_dims(x, axis=i)\n return x\n\n\ndef _make_static_axis_non_negative(axis, ndims):\n \"\"\"Convert possibly negatively indexed axis to non-negative.\n\n Args:\n axis: Iterable over Python integers.\n ndims: Number of dimensions into which axis indexes.\n\n Returns:\n A list of non-negative Python integers.\n\n Raises:\n ValueError: If values in `axis` are too big/small to index into `ndims`.\n \"\"\"\n non_negative_axis = []\n for d in axis:\n if d >= 0:\n if d >= ndims:\n raise ValueError(\"dim %d not in the interval [0, %d].\" % (d, ndims - 1))\n non_negative_axis.append(d)\n else:\n if d < -1 * ndims:\n raise ValueError(\n \"Negatively indexed dim %d not in the interval [-%d, -1]\" % (d,\n ndims))\n non_negative_axis.append(ndims + d)\n return non_negative_axis\n\n\ndef _move_dims_to_flat_end(x, axis, x_ndims):\n \"\"\"Move dims corresponding to `axis` in `x` to the end, then flatten.\n\n Args:\n x: `Tensor` with shape `[B0,B1,...,Bb]`.\n axis: Python list of indices into dimensions of `x`.\n x_ndims: Python integer holding number of dimensions in `x`.\n\n Returns:\n `Tensor` with value from `x` and dims in `axis` moved to end into one single\n dimension.\n \"\"\"\n # Suppose x.shape = [a, b, c, d]\n # Suppose axis = [1, 3]\n\n # front_dims = [0, 2] in example above.\n front_dims = sorted(set(range(x_ndims)).difference(axis))\n # x_permed.shape = [a, c, b, d]\n x_permed = tf.transpose(x, perm=front_dims + list(axis))\n\n if x.get_shape().is_fully_defined():\n x_shape = x.get_shape().as_list()\n # front_shape = [a, c], end_shape = [b * d]\n front_shape = [x_shape[i] for i in front_dims]\n end_shape = [np.prod([x_shape[i] for i in axis])]\n full_shape = front_shape + end_shape\n else:\n front_shape = tf.shape(x_permed)[:x_ndims - len(axis)]\n end_shape = [-1]\n full_shape = tf.concat([front_shape, end_shape], axis=0)\n return tf.reshape(x_permed, shape=full_shape)\n\n\ndef _sort_tensor(tensor):\n \"\"\"Use `top_k` to sort a `Tensor` along the last dimension.\"\"\"\n sorted_, _ = tf.nn.top_k(tensor, k=tf.shape(tensor)[-1])\n return sorted_\n"
] | [
[
"tensorflow.conj",
"tensorflow.assert_rank",
"tensorflow.ones",
"tensorflow.reshape",
"tensorflow.assert_integer",
"tensorflow.ceil",
"tensorflow.cast",
"tensorflow.rank",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.shape",
"tensorflow.concat",
"numpy.log",
"tensorflow.to_double",
"numpy.prod",
"tensorflow.floor",
"tensorflow.range",
"tensorflow.minimum",
"tensorflow.expand_dims",
"tensorflow.round",
"numpy.float64",
"tensorflow.log",
"tensorflow.name_scope",
"tensorflow.ifft",
"tensorflow.to_int32",
"tensorflow.convert_to_tensor",
"tensorflow.fft",
"tensorflow.reduce_mean"
]
] |
Indraos/EconML | [
"056fb30b49db5485ff23bb38cf49cd29bde783c9"
] | [
"econml/dr/_drlearner.py"
] | [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"\nDoubly Robust Learner. The method uses the doubly robust correction to construct doubly\nrobust estimates of all the potential outcomes of each samples. Then estimates a CATE model\nby regressing the potential outcome differences on the heterogeneity features X.\n\nReferences\n----------\n\nDylan Foster, Vasilis Syrgkanis (2019).\n Orthogonal Statistical Learning.\n ACM Conference on Learning Theory. https://arxiv.org/abs/1901.09036\n\nRobins, J.M., Rotnitzky, A., and Zhao, L.P. (1994).\n Estimation of regression coefficients when some regressors are not always observed.\n Journal of the American Statistical Association 89,846–866.\n\nBang, H. and Robins, J.M. (2005).\n Doubly robust estimation in missing data and causal inference models.\n Biometrics 61,962–972.\n\nTsiatis AA (2006).\n Semiparametric Theory and Missing Data.\n New York: Springer; 2006.\n\n.. testcode::\n :hide:\n\n import numpy as np\n import scipy.special\n np.set_printoptions(suppress=True)\n\n\"\"\"\n\nfrom warnings import warn\nfrom copy import deepcopy\n\nimport numpy as np\nfrom sklearn.base import clone\nfrom sklearn.linear_model import (LassoCV, LinearRegression,\n LogisticRegressionCV)\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom .._ortho_learner import _OrthoLearner\nfrom .._cate_estimator import (DebiasedLassoCateEstimatorDiscreteMixin,\n ForestModelFinalCateEstimatorDiscreteMixin,\n StatsModelsCateEstimatorDiscreteMixin, LinearCateEstimator)\nfrom ..inference import GenericModelFinalInferenceDiscrete\nfrom ..grf import RegressionForest\nfrom ..sklearn_extensions.linear_model import (\n DebiasedLasso, StatsModelsLinearRegression, WeightedLassoCVWrapper)\nfrom ..utilities import (_deprecate_positional, check_high_dimensional,\n filter_none_kwargs, fit_with_groups, inverse_onehot, get_feature_names_or_default)\nfrom .._shap import _shap_explain_multitask_model_cate, _shap_explain_model_cate\n\n\nclass _ModelNuisance:\n def __init__(self, model_propensity, model_regression, min_propensity):\n self._model_propensity = model_propensity\n self._model_regression = model_regression\n self._min_propensity = min_propensity\n\n def _combine(self, X, W):\n return np.hstack([arr for arr in [X, W] if arr is not None])\n\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None):\n if Y.ndim != 1 and (Y.ndim != 2 or Y.shape[1] != 1):\n raise ValueError(\"The outcome matrix must be of shape ({0}, ) or ({0}, 1), \"\n \"instead got {1}.\".format(len(X), Y.shape))\n if (X is None) and (W is None):\n raise AttributeError(\"At least one of X or W has to not be None!\")\n if np.any(np.all(T == 0, axis=0)) or (not np.any(np.all(T == 0, axis=1))):\n raise AttributeError(\"Provided crossfit folds contain training splits that \" +\n \"don't contain all treatments\")\n XW = self._combine(X, W)\n filtered_kwargs = filter_none_kwargs(sample_weight=sample_weight)\n\n fit_with_groups(self._model_propensity, XW, inverse_onehot(T), groups=groups, **filtered_kwargs)\n fit_with_groups(self._model_regression, np.hstack([XW, T]), Y, groups=groups, **filtered_kwargs)\n return self\n\n def score(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None):\n XW = self._combine(X, W)\n filtered_kwargs = filter_none_kwargs(sample_weight=sample_weight)\n\n if hasattr(self._model_propensity, 'score'):\n propensity_score = self._model_propensity.score(XW, inverse_onehot(T), **filtered_kwargs)\n else:\n propensity_score = None\n if hasattr(self._model_regression, 'score'):\n regression_score = self._model_regression.score(np.hstack([XW, T]), Y, **filtered_kwargs)\n else:\n regression_score = None\n\n return propensity_score, regression_score\n\n def predict(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None):\n XW = self._combine(X, W)\n propensities = np.maximum(self._model_propensity.predict_proba(XW), self._min_propensity)\n n = T.shape[0]\n Y_pred = np.zeros((T.shape[0], T.shape[1] + 1))\n T_counter = np.zeros(T.shape)\n Y_pred[:, 0] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n)\n Y_pred[:, 0] += (Y.reshape(n) - Y_pred[:, 0]) * np.all(T == 0, axis=1) / propensities[:, 0]\n for t in np.arange(T.shape[1]):\n T_counter = np.zeros(T.shape)\n T_counter[:, t] = 1\n Y_pred[:, t + 1] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n)\n Y_pred[:, t + 1] += (Y.reshape(n) - Y_pred[:, t + 1]) * (T[:, t] == 1) / propensities[:, t + 1]\n return Y_pred.reshape(Y.shape + (T.shape[1] + 1,))\n\n\nclass _ModelFinal:\n # Coding Remark: The reasoning around the multitask_model_final could have been simplified if\n # we simply wrapped the model_final with a MultiOutputRegressor. However, because we also want\n # to allow even for model_final objects whose fit(X, y) can accept X=None\n # (e.g. the StatsModelsLinearRegression), we cannot take that route, because the MultiOutputRegressor\n # checks that X is 2D array.\n def __init__(self, model_final, featurizer, multitask_model_final):\n self._model_final = clone(model_final, safe=False)\n self._featurizer = clone(featurizer, safe=False)\n self._multitask_model_final = multitask_model_final\n return\n\n def fit(self, Y, T, X=None, W=None, *, nuisances, sample_weight=None, sample_var=None):\n Y_pred, = nuisances\n self.d_y = Y_pred.shape[1:-1] # track whether there's a Y dimension (must be a singleton)\n self.d_t = Y_pred.shape[-1] - 1 # track # of treatment (exclude baseline treatment)\n if (X is not None) and (self._featurizer is not None):\n X = self._featurizer.fit_transform(X)\n filtered_kwargs = filter_none_kwargs(sample_weight=sample_weight, sample_var=sample_var)\n if self._multitask_model_final:\n ys = Y_pred[..., 1:] - Y_pred[..., [0]] # subtract control results from each other arm\n if self.d_y: # need to squeeze out singleton so that we fit on 2D array\n ys = ys.squeeze(1)\n self.model_cate = self._model_final.fit(X, ys, **filtered_kwargs)\n else:\n self.models_cate = [clone(self._model_final, safe=False).fit(X, Y_pred[..., t] - Y_pred[..., 0],\n **filtered_kwargs)\n for t in np.arange(1, Y_pred.shape[-1])]\n return self\n\n def predict(self, X=None):\n if (X is not None) and (self._featurizer is not None):\n X = self._featurizer.transform(X)\n if self._multitask_model_final:\n pred = self.model_cate.predict(X).reshape((-1, self.d_t))\n if self.d_y: # need to reintroduce singleton Y dimension\n return pred[:, np.newaxis, :]\n return pred\n else:\n preds = np.array([mdl.predict(X).reshape((-1,) + self.d_y) for mdl in self.models_cate])\n return np.moveaxis(preds, 0, -1) # move treatment dim to end\n\n def score(self, Y, T, X=None, W=None, *, nuisances, sample_weight=None, sample_var=None):\n if (X is not None) and (self._featurizer is not None):\n X = self._featurizer.transform(X)\n Y_pred, = nuisances\n if self._multitask_model_final:\n Y_pred_diff = Y_pred[..., 1:] - Y_pred[..., [0]]\n cate_pred = self.model_cate.predict(X).reshape((-1, self.d_t))\n if self.d_y:\n cate_pred = cate_pred[:, np.newaxis, :]\n return np.mean(np.average((Y_pred_diff - cate_pred)**2, weights=sample_weight, axis=0))\n\n else:\n scores = []\n for t in np.arange(1, Y_pred.shape[-1]):\n # since we only allow single dimensional y, we could flatten the prediction\n Y_pred_diff = (Y_pred[..., t] - Y_pred[..., 0]).flatten()\n cate_pred = self.models_cate[t - 1].predict(X).flatten()\n score = np.average((Y_pred_diff - cate_pred)**2, weights=sample_weight, axis=0)\n scores.append(score)\n return np.mean(scores)\n\n\nclass DRLearner(_OrthoLearner):\n \"\"\"\n CATE estimator that uses doubly-robust correction techniques to account for\n covariate shift (selection bias) between the treatment arms. The estimator is a special\n case of an :class:`._OrthoLearner` estimator, so it follows the two\n stage process, where a set of nuisance functions are estimated in the first stage in a crossfitting\n manner and a final stage estimates the CATE model. See the documentation of\n :class:`._OrthoLearner` for a description of this two stage process.\n\n In this estimator, the CATE is estimated by using the following estimating equations. If we let:\n\n .. math ::\n Y_{i, t}^{DR} = E[Y | X_i, W_i, T_i=t]\\\n + \\\\frac{Y_i - E[Y | X_i, W_i, T_i=t]}{Pr[T_i=t | X_i, W_i]} \\\\cdot 1\\\\{T_i=t\\\\}\n\n Then the following estimating equation holds:\n\n .. math ::\n E\\\\left[Y_{i, t}^{DR} - Y_{i, 0}^{DR} | X_i\\\\right] = \\\\theta_t(X_i)\n\n Thus if we estimate the nuisance functions :math:`h(X, W, T) = E[Y | X, W, T]` and\n :math:`p_t(X, W)=Pr[T=t | X, W]` in the first stage, we can estimate the final stage cate for each\n treatment t, by running a regression, regressing :math:`Y_{i, t}^{DR} - Y_{i, 0}^{DR}` on :math:`X_i`.\n\n The problem of estimating the nuisance function :math:`p` is a simple multi-class classification\n problem of predicting the label :math:`T` from :math:`X, W`. The :class:`.DRLearner`\n class takes as input the parameter ``model_propensity``, which is an arbitrary scikit-learn\n classifier, that is internally used to solve this classification problem.\n\n The second nuisance function :math:`h` is a simple regression problem and the :class:`.DRLearner`\n class takes as input the parameter ``model_regressor``, which is an arbitrary scikit-learn regressor that\n is internally used to solve this regression problem.\n\n The final stage is multi-task regression problem with outcomes the labels :math:`Y_{i, t}^{DR} - Y_{i, 0}^{DR}`\n for each non-baseline treatment t. The :class:`.DRLearner` takes as input parameter\n ``model_final``, which is any scikit-learn regressor that is internally used to solve this multi-task\n regresion problem. If the parameter ``multitask_model_final`` is False, then this model is assumed\n to be a mono-task regressor, and separate clones of it are used to solve each regression target\n separately.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier or 'auto', optional (default='auto')\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen.\n\n model_regression : scikit-learn regressor or 'auto', optional (default='auto')\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`.MultiModelWrapper` helper class.\n If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.\n\n model_final :\n estimator for the final cate model. Trained on regressing the doubly robust potential outcomes\n on (features X).\n\n - If X is None, then the fit method of model_final should be able to handle X=None.\n - If featurizer is not None and X is not None, then it is trained on the outcome of\n featurizer.fit_transform(X).\n - If multitask_model_final is True, then this model must support multitasking\n and it is trained by regressing all doubly robust target outcomes on (featurized) features simultanteously.\n - The output of the predict(X) of the trained model will contain the CATEs for each treatment compared to\n baseline treatment (lexicographically smallest). If multitask_model_final is False, it is assumed to be a\n mono-task model and a separate clone of the model is trained for each outcome. Then predict(X) of the t-th\n clone will be the CATE of the t-th lexicographically ordered treatment compared to the baseline.\n\n multitask_model_final : bool, optional, default False\n Whether the model_final should be treated as a multi-task model. See description of model_final.\n\n featurizer : :term:`transformer`, optional, default None\n Must support fit_transform and transform. Used to create composite features in the final CATE regression.\n It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).\n If featurizer=None, then CATE is trained on X.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n cv: int, cross-validation generator or an iterable, optional (default is 2)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all\n W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.\n\n mc_iters: int, optional (default=None)\n The number of times to rerun the first stage models to reduce the variance of the nuisances.\n\n mc_agg: {'mean', 'median'}, optional (default='mean')\n How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of\n cross-fitting.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random<numpy.random>`.\n\n Examples\n --------\n A simple example with the default models:\n\n .. testcode::\n :hide:\n\n import numpy as np\n import scipy.special\n np.set_printoptions(suppress=True)\n\n .. testcode::\n\n from econml.dr import DRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n sigma = 0.001\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))\n est = DRLearner()\n est.fit(y, T, X=X, W=None)\n\n >>> est.const_marginal_effect(X[:2])\n array([[0.511640..., 1.144004...],\n [0.378140..., 0.613143...]])\n >>> est.effect(X[:2], T0=0, T1=1)\n array([0.511640..., 0.378140...])\n >>> est.score_\n 5.11238581...\n >>> est.score(y, T, X=X)\n 5.78673506...\n >>> est.model_cate(T=1).coef_\n array([0.434910..., 0.010226..., 0.047913...])\n >>> est.model_cate(T=2).coef_\n array([ 0.863723..., 0.086946..., -0.022288...])\n >>> est.cate_feature_names()\n ['X0', 'X1', 'X2']\n >>> [mdl.coef_ for mdls in est.models_regression for mdl in mdls]\n [array([ 1.472..., 0.001..., -0.011..., 0.698..., 2.049...]),\n array([ 1.455..., -0.002..., 0.005..., 0.677..., 1.998...])]\n >>> [mdl.coef_ for mdls in est.models_propensity for mdl in mdls]\n [array([[-0.747..., 0.153..., -0.018...],\n [ 0.083..., -0.110..., -0.076...],\n [ 0.663..., -0.043... , 0.094...]]),\n array([[-1.048..., 0.000..., 0.032...],\n [ 0.019..., 0.124..., -0.081...],\n [ 1.029..., -0.124..., 0.049...]])]\n\n Beyond default models:\n\n .. testcode::\n\n from sklearn.linear_model import LassoCV\n from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n from econml.dr import DRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n sigma = 0.01\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))\n est = DRLearner(model_propensity=RandomForestClassifier(n_estimators=100, min_samples_leaf=10),\n model_regression=RandomForestRegressor(n_estimators=100, min_samples_leaf=10),\n model_final=LassoCV(cv=3),\n featurizer=None)\n est.fit(y, T, X=X, W=None)\n\n >>> est.score_\n 1.7...\n >>> est.const_marginal_effect(X[:3])\n array([[0.68..., 1.10...],\n [0.56..., 0.79...],\n [0.34..., 0.10...]])\n >>> est.model_cate(T=2).coef_\n array([0.74..., 0. , 0. ])\n >>> est.model_cate(T=2).intercept_\n 1.9...\n >>> est.model_cate(T=1).coef_\n array([0.24..., 0.00..., 0. ])\n >>> est.model_cate(T=1).intercept_\n 0.94...\n\n Attributes\n ----------\n score_ : float\n The MSE in the final doubly robust potential outcome regressions, i.e.\n\n .. math::\n \\\\frac{1}{n_t} \\\\sum_{t=1}^{n_t} \\\\frac{1}{n} \\\\sum_{i=1}^n (Y_{i, t}^{DR} - \\\\hat{\\\\theta}_t(X_i))^2\n\n where n_t is the number of treatments (excluding control).\n\n If `sample_weight` is not None at fit time, then a weighted average across samples is returned.\n\n\n \"\"\"\n\n def __init__(self, *,\n model_propensity='auto',\n model_regression='auto',\n model_final=StatsModelsLinearRegression(),\n multitask_model_final=False,\n featurizer=None,\n min_propensity=1e-6,\n categories='auto',\n cv=2,\n mc_iters=None,\n mc_agg='mean',\n random_state=None):\n self.model_propensity = clone(model_propensity, safe=False)\n self.model_regression = clone(model_regression, safe=False)\n self.model_final = clone(model_final, safe=False)\n self.multitask_model_final = multitask_model_final\n self.featurizer = clone(featurizer, safe=False)\n self.min_propensity = min_propensity\n super().__init__(cv=cv,\n mc_iters=mc_iters,\n mc_agg=mc_agg,\n discrete_treatment=True,\n discrete_instrument=False, # no instrument, so doesn't matter\n categories=categories,\n random_state=random_state)\n\n def _get_inference_options(self):\n options = super()._get_inference_options()\n if not self.multitask_model_final:\n options.update(auto=GenericModelFinalInferenceDiscrete)\n else:\n options.update(auto=lambda: None)\n return options\n\n def _gen_ortho_learner_model_nuisance(self):\n if self.model_propensity == 'auto':\n model_propensity = LogisticRegressionCV(cv=3, solver='lbfgs', multi_class='auto',\n random_state=self.random_state)\n else:\n model_propensity = clone(self.model_propensity, safe=False)\n\n if self.model_regression == 'auto':\n model_regression = WeightedLassoCVWrapper(cv=3, random_state=self.random_state)\n else:\n model_regression = clone(self.model_regression, safe=False)\n\n return _ModelNuisance(model_propensity, model_regression, self.min_propensity)\n\n def _gen_featurizer(self):\n return clone(self.featurizer, safe=False)\n\n def _gen_model_final(self):\n return clone(self.model_final, safe=False)\n\n def _gen_ortho_learner_model_final(self):\n return _ModelFinal(self._gen_model_final(), self._gen_featurizer(), self.multitask_model_final)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None,\n cache_values=False, inference='auto'):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates function :math:`\\\\theta(\\\\cdot)`.\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n sample_weight: optional(n,) vector or None (Default=None)\n Weights for each samples\n sample_var: optional(n,) vector or None (Default=None)\n Sample variance for each sample\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the `cv` argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n cache_values: bool, default False\n Whether to cache inputs and first stage results, which will allow refitting a different final model\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`).\n\n Returns\n -------\n self: DRLearner instance\n \"\"\"\n # Replacing fit from _OrthoLearner, to enforce Z=None and improve the docstring\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=sample_var, groups=groups,\n cache_values=cache_values, inference=inference)\n\n def refit_final(self, *, inference='auto'):\n return super().refit_final(inference=inference)\n refit_final.__doc__ = _OrthoLearner.refit_final.__doc__\n\n def score(self, Y, T, X=None, W=None):\n \"\"\"\n Score the fitted CATE model on a new data set. Generates nuisance parameters\n for the new data set based on the fitted residual nuisance models created at fit time.\n It uses the mean prediction of the models fitted by the different crossfit folds.\n Then calculates the MSE of the final residual Y on residual T regression.\n\n If model_final does not have a score method, then it raises an :exc:`.AttributeError`\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n\n Returns\n -------\n score: float\n The MSE of the final CATE model on the new data.\n \"\"\"\n # Replacing score from _OrthoLearner, to enforce Z=None and improve the docstring\n return super().score(Y, T, X=X, W=W)\n\n @property\n def multitask_model_cate(self):\n \"\"\"\n Get the fitted final CATE model.\n\n Returns\n -------\n multitask_model_cate: object of type(`model_final`)\n An instance of the model_final object that was fitted after calling fit which corresponds whose\n vector of outcomes correspond to the CATE model for each treatment, compared to baseline.\n Available only when multitask_model_final=True.\n \"\"\"\n if not self.ortho_learner_model_final_._multitask_model_final:\n raise AttributeError(\"Separate CATE models were fitted for each treatment! Use model_cate.\")\n return self.ortho_learner_model_final_.model_cate\n\n def model_cate(self, T=1):\n \"\"\"\n Get the fitted final CATE model.\n\n Parameters\n ----------\n T: alphanumeric\n The treatment with respect to which we want the fitted CATE model.\n\n Returns\n -------\n model_cate: object of type(model_final)\n An instance of the model_final object that was fitted after calling fit which corresponds\n to the CATE model for treatment T=t, compared to baseline. Available when multitask_model_final=False.\n \"\"\"\n if self.ortho_learner_model_final_._multitask_model_final:\n raise AttributeError(\"A single multitask model was fitted for all treatments! Use multitask_model_cate.\")\n _, T = self._expand_treatments(None, T)\n ind = inverse_onehot(T).item() - 1\n assert ind >= 0, \"No model was fitted for the control\"\n return self.ortho_learner_model_final_.models_cate[ind]\n\n @property\n def models_propensity(self):\n \"\"\"\n Get the fitted propensity models.\n\n Returns\n -------\n models_propensity: nested list of objects of type(`model_propensity`)\n A nested list of instances of the `model_propensity` object. Number of sublist equals to number of\n monte carlo iterations, each element in the sublist corresponds to a crossfitting\n fold and is the model instance that was fitted for that training fold.\n \"\"\"\n return [[mdl._model_propensity for mdl in mdls] for mdls in super().models_nuisance_]\n\n @property\n def models_regression(self):\n \"\"\"\n Get the fitted regression models.\n\n Returns\n -------\n model_regression: nested list of objects of type(`model_regression`)\n A nested list of instances of the model_regression object. Number of sublist equals to number of\n monte carlo iterations, each element in the sublist corresponds to a crossfitting\n fold and is the model instance that was fitted for that training fold.\n \"\"\"\n return [[mdl._model_regression for mdl in mdls] for mdls in super().models_nuisance_]\n\n @property\n def nuisance_scores_propensity(self):\n \"\"\"Gets the score for the propensity model on out-of-sample training data\"\"\"\n return self.nuisance_scores_[0]\n\n @property\n def nuisance_scores_regression(self):\n \"\"\"Gets the score for the regression model on out-of-sample training data\"\"\"\n return self.nuisance_scores_[1]\n\n @property\n def featurizer_(self):\n \"\"\"\n Get the fitted featurizer.\n\n Returns\n -------\n featurizer: object of type(`featurizer`)\n An instance of the fitted featurizer that was used to preprocess X in the final CATE model training.\n Available only when featurizer is not None and X is not None.\n \"\"\"\n return self.ortho_learner_model_final_._featurizer\n\n def cate_feature_names(self, feature_names=None):\n \"\"\"\n Get the output feature names.\n\n Parameters\n ----------\n feature_names: list of strings of length X.shape[1] or None\n The names of the input features. If None and X is a dataframe, it defaults to the column names\n from the dataframe.\n\n Returns\n -------\n out_feature_names: list of strings or None\n The names of the output features :math:`\\\\phi(X)`, i.e. the features with respect to which the\n final CATE model for each treatment is linear. It is the names of the features that are associated\n with each entry of the :meth:`coef_` parameter. Available only when the featurizer is not None and has\n a method: `get_feature_names(feature_names)`. Otherwise None is returned.\n \"\"\"\n if self._d_x is None:\n # Handles the corner case when X=None but featurizer might be not None\n return None\n if feature_names is None:\n feature_names = self._input_names[\"feature_names\"]\n if self.featurizer_ is None:\n return feature_names\n return get_feature_names_or_default(self.featurizer_, feature_names)\n\n @property\n def model_final_(self):\n return self.ortho_learner_model_final_._model_final\n\n @property\n def fitted_models_final(self):\n return self.ortho_learner_model_final_.models_cate\n\n def shap_values(self, X, *, feature_names=None, treatment_names=None, output_names=None, background_samples=100):\n if self.ortho_learner_model_final_._multitask_model_final:\n return _shap_explain_multitask_model_cate(self.const_marginal_effect, self.multitask_model_cate, X,\n self._d_t, self._d_y,\n featurizer=self.featurizer_,\n feature_names=feature_names,\n treatment_names=treatment_names,\n output_names=output_names,\n input_names=self._input_names,\n background_samples=background_samples)\n else:\n return _shap_explain_model_cate(self.const_marginal_effect, self.fitted_models_final,\n X, self._d_t, self._d_y,\n featurizer=self.featurizer_,\n feature_names=feature_names,\n treatment_names=treatment_names,\n output_names=output_names,\n input_names=self._input_names,\n background_samples=background_samples)\n shap_values.__doc__ = LinearCateEstimator.shap_values.__doc__\n\n\nclass LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner):\n \"\"\"\n Special case of the :class:`.DRLearner` where the final stage\n is a Linear Regression on a low dimensional set of features. In this case, inference\n can be performed via the asymptotic normal characterization of the estimated parameters.\n This is computationally faster than bootstrap inference. To do this, just leave the setting ``inference='auto'``\n unchanged, or explicitly set ``inference='statsmodels'`` or alter the covariance type calculation via\n ``inference=StatsModelsInferenceDiscrete(cov_type='HC1)``.\n\n More concretely, this estimator assumes that the final cate model for each treatment takes a linear form:\n\n .. math ::\n \\\\theta_t(X) = \\\\left\\\\langle \\\\theta_t, \\\\phi(X) \\\\right\\\\rangle + \\\\beta_t\n\n where :math:`\\\\phi(X)` is the outcome features of the featurizers, or `X` if featurizer is None. :math:`\\\\beta_t`\n is a an intercept of the CATE, which is included if ``fit_cate_intercept=True`` (Default). It fits this by\n running a standard ordinary linear regression (OLS), regressing the doubly robust outcome differences on X:\n\n .. math ::\n \\\\min_{\\\\theta_t, \\\\beta_t}\\\n E_n\\\\left[\\\\left(Y_{i, t}^{DR} - Y_{i, 0}^{DR}\\\n - \\\\left\\\\langle \\\\theta_t, \\\\phi(X_i) \\\\right\\\\rangle - \\\\beta_t\\\\right)^2\\\\right]\n\n Then inference can be performed via standard approaches for inference of OLS, via asympotic normal approximations\n of the estimated parameters. The default covariance estimator used is heteroskedasticity robust (HC1).\n For other methods see :class:`.StatsModelsInferenceDiscrete`. Use can invoke them by setting:\n ``inference=StatsModelsInferenceDiscrete(cov_type=...)``.\n\n This approach is valid even if the CATE model is not linear in :math:`\\\\phi(X)`. In this case it performs\n inference on the best linear approximation of the CATE model.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier or 'auto', optional (default='auto')\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen.\n\n model_regression : scikit-learn regressor or 'auto', optional (default='auto')\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`.MultiModelWrapper` helper class.\n If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.\n\n featurizer : :term:`transformer`, optional, default None\n Must support fit_transform and transform. Used to create composite features in the final CATE regression.\n It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).\n If featurizer=None, then CATE is trained on X.\n\n fit_cate_intercept : bool, optional, default True\n Whether the linear CATE model should have a constant term.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n cv: int, cross-validation generator or an iterable, optional (default is 2)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(X,T)` to generate the splits.\n\n mc_iters: int, optional (default=None)\n The number of times to rerun the first stage models to reduce the variance of the nuisances.\n\n mc_agg: {'mean', 'median'}, optional (default='mean')\n How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of\n cross-fitting.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random<numpy.random>`.\n\n Examples\n --------\n A simple example with the default models:\n\n .. testcode::\n :hide:\n\n import numpy as np\n import scipy.special\n np.set_printoptions(suppress=True)\n\n .. testcode::\n\n from econml.dr import DRLearner, LinearDRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(size=(1000,))\n est = LinearDRLearner()\n est.fit(y, T, X=X, W=None)\n\n >>> est.effect(X[:3])\n array([ 0.409743..., 0.312604..., -0.127394...])\n >>> est.effect_interval(X[:3])\n (array([ 0.120682..., -0.102543..., -0.663246...]), array([0.698803..., 0.727753..., 0.408458...]))\n >>> est.coef_(T=1)\n array([ 0.450779..., -0.003214... , 0.063884... ])\n >>> est.coef__interval(T=1)\n (array([ 0.202646..., -0.207195..., -0.104558...]), array([0.698911..., 0.200767..., 0.232326...]))\n >>> est.intercept_(T=1)\n 0.88425066...\n >>> est.intercept__interval(T=1)\n (0.68655813..., 1.08194320...)\n\n Attributes\n ----------\n score_ : float\n The MSE in the final doubly robust potential outcome regressions, i.e.\n\n .. math::\n \\\\frac{1}{n_t} \\\\sum_{t=1}^{n_t} \\\\frac{1}{n} \\\\sum_{i=1}^n (Y_{i, t}^{DR} - \\\\hat{\\\\theta}_t(X_i))^2\n\n where n_t is the number of treatments (excluding control).\n\n If `sample_weight` is not None at fit time, then a weighted average across samples is returned.\n\n \"\"\"\n\n def __init__(self, *,\n model_propensity='auto',\n model_regression='auto',\n featurizer=None,\n fit_cate_intercept=True,\n min_propensity=1e-6,\n categories='auto',\n cv=2,\n mc_iters=None,\n mc_agg='mean',\n random_state=None):\n self.fit_cate_intercept = fit_cate_intercept\n super().__init__(model_propensity=model_propensity,\n model_regression=model_regression,\n model_final=None,\n featurizer=featurizer,\n multitask_model_final=False,\n min_propensity=min_propensity,\n categories=categories,\n cv=cv,\n mc_iters=mc_iters,\n mc_agg=mc_agg,\n random_state=random_state)\n\n def _gen_model_final(self):\n return StatsModelsLinearRegression(fit_intercept=self.fit_cate_intercept)\n\n def _gen_ortho_learner_model_final(self):\n return _ModelFinal(self._gen_model_final(), self._gen_featurizer(), False)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None,\n cache_values=False, inference='auto'):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates function :math:`\\\\theta(\\\\cdot)`.\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n sample_weight: optional(n,) vector or None (Default=None)\n Weights for each samples\n sample_var: optional(n,) vector or None (Default=None)\n Sample variance for each sample\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the `cv` argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n cache_values: bool, default False\n Whether to cache inputs and first stage results, which will allow refitting a different final model\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports ``'bootstrap'``\n (or an instance of :class:`.BootstrapInference`) and ``'statsmodels'``\n (or an instance of :class:`.StatsModelsInferenceDiscrete`).\n\n Returns\n -------\n self: DRLearner instance\n \"\"\"\n # Replacing fit from DRLearner, to add statsmodels inference in docstring\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=sample_var, groups=groups,\n cache_values=cache_values, inference=inference)\n\n @property\n def fit_cate_intercept_(self):\n return self.model_final_.fit_intercept\n\n @property\n def multitask_model_cate(self):\n # Replacing this method which is invalid for this class, so that we make the\n # dosctring empty and not appear in the docs.\n return super().multitask_model_cate\n\n @property\n def multitask_model_final(self):\n return False\n\n @multitask_model_final.setter\n def multitask_model_final(self, value):\n if value:\n raise ValueError(\"Parameter `multitask_model_final` cannot change from `False` for this estimator!\")\n\n @property\n def model_final(self):\n return self._gen_model_final()\n\n @model_final.setter\n def model_final(self, model):\n if model is not None:\n raise ValueError(\"Parameter `model_final` cannot be altered for this estimator!\")\n\n\nclass SparseLinearDRLearner(DebiasedLassoCateEstimatorDiscreteMixin, DRLearner):\n \"\"\"\n Special case of the :class:`.DRLearner` where the final stage\n is a Debiased Lasso Regression. In this case, inference can be performed via the debiased lasso approach\n and its asymptotic normal characterization of the estimated parameters. This is computationally\n faster than bootstrap inference. Leave the default ``inference='auto'`` unchanged, or explicitly set\n ``inference='debiasedlasso'`` at fit time to enable inference via asymptotic normality.\n\n More concretely, this estimator assumes that the final cate model for each treatment takes a linear form:\n\n .. math ::\n \\\\theta_t(X) = \\\\left\\\\langle \\\\theta_t, \\\\phi(X) \\\\right\\\\rangle + \\\\beta_t\n\n where :math:`\\\\phi(X)` is the outcome features of the featurizers, or `X` if featurizer is None. :math:`\\\\beta_t`\n is a an intercept of the CATE, which is included if ``fit_cate_intercept=True`` (Default). It fits this by\n running a debiased lasso regression (i.e. :math:`\\\\ell_1`-penalized regression with debiasing),\n regressing the doubly robust outcome differences on X: i.e. first solves the penalized square loss problem\n\n .. math ::\n \\\\min_{\\\\theta_t, \\\\beta_t}\\\n E_n\\\\left[\\\\left(Y_{i, t}^{DR} - Y_{i, 0}^{DR}\\\n - \\\\left\\\\langle \\\\theta_t, \\\\phi(X_i) \\\\right\\\\rangle - \\\\beta_t\\\\right)^2\\\\right]\\\n + \\\\lambda \\\\left\\\\lVert \\\\theta_t \\\\right\\\\rVert_1\n\n and then adds a debiasing correction to the solution. If alpha='auto' (recommended), then the penalty\n weight :math:`\\\\lambda` is set optimally via cross-validation.\n\n This approach is valid even if the CATE model is not linear in :math:`\\\\phi(X)`. In this case it performs\n inference on the best sparse linear approximation of the CATE model.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier or 'auto', optional (default='auto')\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen.\n\n model_regression : scikit-learn regressor or 'auto', optional (default='auto')\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`.MultiModelWrapper` helper class.\n If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.\n\n featurizer : :term:`transformer`, optional, default None\n Must support fit_transform and transform. Used to create composite features in the final CATE regression.\n It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).\n If featurizer=None, then CATE is trained on X.\n\n fit_cate_intercept : bool, optional, default True\n Whether the linear CATE model should have a constant term.\n\n alpha: string | float, optional., default 'auto'.\n CATE L1 regularization applied through the debiased lasso in the final model.\n 'auto' corresponds to a CV form of the :class:`DebiasedLasso`.\n\n n_alphas : int, optional, default 100\n How many alphas to try if alpha='auto'\n\n alpha_cov : string | float, optional, default 'auto'\n The regularization alpha that is used when constructing the pseudo inverse of\n the covariance matrix Theta used to for correcting the final state lasso coefficient\n in the debiased lasso. Each such regression corresponds to the regression of one feature\n on the remainder of the features.\n\n n_alphas_cov : int, optional, default 10\n How many alpha_cov to try if alpha_cov='auto'.\n\n max_iter : int, optional, default 1000\n The maximum number of iterations in the Debiased Lasso\n\n tol : float, optional, default 1e-4\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n n_jobs : int or None, optional (default=None)\n The number of jobs to run in parallel for both `fit` and `predict`.\n ``None`` means 1 unless in a :func:`joblib.parallel_backend` context.\n ``-1`` means using all processors.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n cv: int, cross-validation generator or an iterable, optional, default 2\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(X,T)` to generate the splits.\n\n mc_iters: int, optional (default=None)\n The number of times to rerun the first stage models to reduce the variance of the nuisances.\n\n mc_agg: {'mean', 'median'}, optional (default='mean')\n How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of\n cross-fitting.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random<numpy.random>`.\n\n Examples\n --------\n A simple example with the default models:\n\n .. testcode::\n :hide:\n\n import numpy as np\n import scipy.special\n np.set_printoptions(suppress=True)\n\n .. testcode::\n\n from econml.dr import DRLearner, SparseLinearDRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(size=(1000,))\n est = SparseLinearDRLearner()\n est.fit(y, T, X=X, W=None)\n\n >>> est.effect(X[:3])\n array([ 0.41..., 0.31..., -0.12...])\n >>> est.effect_interval(X[:3])\n (array([ 0.04..., -0.19..., -0.73...]), array([0.77..., 0.82..., 0.47...]))\n >>> est.coef_(T=1)\n array([ 0.45..., -0.00..., 0.06...])\n >>> est.coef__interval(T=1)\n (array([ 0.24... , -0.19..., -0.13...]), array([0.65..., 0.19..., 0.26...]))\n >>> est.intercept_(T=1)\n 0.88...\n >>> est.intercept__interval(T=1)\n (0.68..., 1.08...)\n\n Attributes\n ----------\n score_ : float\n The MSE in the final doubly robust potential outcome regressions, i.e.\n\n .. math::\n \\\\frac{1}{n_t} \\\\sum_{t=1}^{n_t} \\\\frac{1}{n} \\\\sum_{i=1}^n (Y_{i, t}^{DR} - \\\\hat{\\\\theta}_t(X_i))^2\n\n where n_t is the number of treatments (excluding control).\n\n If `sample_weight` is not None at fit time, then a weighted average across samples is returned.\n\n \"\"\"\n\n def __init__(self, *,\n model_propensity='auto',\n model_regression='auto',\n featurizer=None,\n fit_cate_intercept=True,\n alpha='auto',\n n_alphas=100,\n alpha_cov='auto',\n n_alphas_cov=10,\n max_iter=1000,\n tol=1e-4,\n n_jobs=None,\n min_propensity=1e-6,\n categories='auto',\n cv=2,\n mc_iters=None,\n mc_agg='mean',\n random_state=None):\n self.fit_cate_intercept = fit_cate_intercept\n self.alpha = alpha\n self.n_alphas = n_alphas\n self.alpha_cov = alpha_cov\n self.n_alphas_cov = n_alphas_cov\n self.max_iter = max_iter\n self.tol = tol\n self.n_jobs = n_jobs\n super().__init__(model_propensity=model_propensity,\n model_regression=model_regression,\n model_final=None,\n featurizer=featurizer,\n multitask_model_final=False,\n min_propensity=min_propensity,\n categories=categories,\n cv=cv,\n mc_iters=mc_iters,\n mc_agg=mc_agg,\n random_state=random_state)\n\n def _gen_model_final(self):\n return DebiasedLasso(alpha=self.alpha,\n n_alphas=self.n_alphas,\n alpha_cov=self.alpha_cov,\n n_alphas_cov=self.n_alphas_cov,\n fit_intercept=self.fit_cate_intercept,\n max_iter=self.max_iter,\n tol=self.tol,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n def _gen_ortho_learner_model_final(self):\n return _ModelFinal(self._gen_model_final(), self._gen_featurizer(), False)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None,\n cache_values=False, inference='auto'):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates function :math:`\\\\theta(\\\\cdot)`.\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n sample_weight: optional(n,) vector or None (Default=None)\n Weights for each samples\n sample_var: optional(n,) vector or None (Default=None)\n Sample variance for each sample\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the `cv` argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n cache_values: bool, default False\n Whether to cache inputs and first stage results, which will allow refitting a different final model\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports ``'bootstrap'``\n (or an instance of :class:`.BootstrapInference`) and ``'debiasedlasso'``\n (or an instance of :class:`.LinearModelInferenceDiscrete`).\n\n Returns\n -------\n self: DRLearner instance\n \"\"\"\n # Replacing fit from DRLearner, to add debiasedlasso inference in docstring\n # TODO: support sample_var\n if sample_weight is not None and inference is not None:\n warn(\"This estimator does not yet support sample variances and inference does not take \"\n \"sample variances into account. This feature will be supported in a future release.\")\n check_high_dimensional(X, T, threshold=5, featurizer=self.featurizer,\n discrete_treatment=self.discrete_treatment,\n msg=\"The number of features in the final model (< 5) is too small for a sparse model. \"\n \"We recommend using the LinearDRLearner for this low-dimensional setting.\")\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=None, groups=groups,\n cache_values=cache_values, inference=inference)\n\n @property\n def fit_cate_intercept_(self):\n return self.model_final_.fit_intercept\n\n @property\n def multitask_model_final(self):\n return False\n\n @multitask_model_final.setter\n def multitask_model_final(self, value):\n if value:\n raise ValueError(\"Parameter `multitask_model_final` cannot change from `False` for this estimator!\")\n\n @property\n def model_final(self):\n return self._gen_model_final()\n\n @model_final.setter\n def model_final(self, model):\n if model is not None:\n raise ValueError(\"Parameter `model_final` cannot be altered for this estimator!\")\n\n\nclass ForestDRLearner(ForestModelFinalCateEstimatorDiscreteMixin, DRLearner):\n \"\"\" Instance of DRLearner with a :class:`~econml.grf.RegressionForest`\n as a final model, so as to enable non-parametric inference.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n\n model_regression : scikit-learn regressor\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`~econml.utilities.MultiModelWrapper` helper class.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n cv: int, cross-validation generator or an iterable, optional (Default=2)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all\n W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.\n\n mc_iters: int, optional (default=None)\n The number of times to rerun the first stage models to reduce the variance of the nuisances.\n\n mc_agg: {'mean', 'median'}, optional (default='mean')\n How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of\n cross-fitting.\n\n n_estimators : integer, optional (default=100)\n The total number of trees in the forest. The forest consists of a\n forest of sqrt(n_estimators) sub-forests, where each sub-forest\n contains sqrt(n_estimators) trees.\n\n max_depth : integer or None, optional (default=None)\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int, float, optional (default=2)\n The minimum number of splitting samples required to split an internal node.\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n min_samples_leaf : int, float, optional (default=1)\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` splitting samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression. After construction the tree is also pruned\n so that there are at least min_samples_leaf estimation samples on\n each leaf.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the sum total of weights (of all\n splitting samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided. After construction\n the tree is pruned so that the fraction of the sum total weight\n of the estimation samples contained in each leaf node is at\n least min_weight_fraction_leaf\n\n max_features : int, float, string or None, optional (default=\"auto\")\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n min_impurity_decrease : float, optional (default=0.)\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of split samples, ``N_t`` is the number of\n split samples at the current node, ``N_t_L`` is the number of split samples in the\n left child, and ``N_t_R`` is the number of split samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n max_samples : int or float in (0, .5], default=.45,\n The number of samples to use for each subsample that is used to train each tree:\n\n - If int, then train each tree on `max_samples` samples, sampled without replacement from all the samples\n - If float, then train each tree on ceil(`max_samples` * `n_samples`), sampled without replacement\n from all the samples.\n\n min_balancedness_tol: float in [0, .5], default=.45\n How imbalanced a split we can tolerate. This enforces that each split leaves at least\n (.5 - min_balancedness_tol) fraction of samples on each side of the split; or fraction\n of the total weight of samples, when sample_weight is not None. Default value, ensures\n that at least 5% of the parent node weight falls in each side of the split. Set it to 0.0 for no\n balancedness and to .5 for perfectly balanced splits. For the formal inference theory\n to be valid, this has to be any positive constant bounded away from zero.\n\n honest : boolean, optional (default=True)\n Whether to use honest trees, i.e. half of the samples are used for\n creating the tree structure and the other half for the estimation at\n the leafs. If False, then all samples are used for both parts.\n\n subforest_size : int, default=4,\n The number of trees in each sub-forest that is used in the bootstrap-of-little-bags calculation.\n The parameter `n_estimators` must be divisible by `subforest_size`. Should typically be a small constant.\n\n n_jobs : int or None, optional (default=-1)\n The number of jobs to run in parallel for both `fit` and `predict`.\n ``None`` means 1 unless in a :func:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n verbose : int, optional (default=0)\n Controls the verbosity when fitting and predicting.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random<numpy.random>`.\n \"\"\"\n\n def __init__(self, *,\n model_regression=\"auto\",\n model_propensity=\"auto\",\n featurizer=None,\n min_propensity=1e-6,\n categories='auto',\n cv=2,\n mc_iters=None,\n mc_agg='mean',\n n_estimators=1000,\n max_depth=None,\n min_samples_split=5,\n min_samples_leaf=5,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n min_impurity_decrease=0.,\n max_samples=.45,\n min_balancedness_tol=.45,\n honest=True,\n subforest_size=4,\n n_jobs=-1,\n verbose=0,\n random_state=None):\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.min_impurity_decrease = min_impurity_decrease\n self.max_samples = max_samples\n self.min_balancedness_tol = min_balancedness_tol\n self.honest = honest\n self.subforest_size = subforest_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n super().__init__(model_regression=model_regression,\n model_propensity=model_propensity,\n model_final=None,\n featurizer=featurizer,\n multitask_model_final=False,\n min_propensity=min_propensity,\n categories=categories,\n cv=cv,\n mc_iters=mc_iters,\n mc_agg=mc_agg,\n random_state=random_state)\n\n def _gen_model_final(self):\n return RegressionForest(n_estimators=self.n_estimators,\n max_depth=self.max_depth,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n max_features=self.max_features,\n min_impurity_decrease=self.min_impurity_decrease,\n max_samples=self.max_samples,\n min_balancedness_tol=self.min_balancedness_tol,\n honest=self.honest,\n inference=True,\n subforest_size=self.subforest_size,\n n_jobs=self.n_jobs,\n random_state=self.random_state,\n verbose=self.verbose,\n warm_start=False)\n\n def _gen_ortho_learner_model_final(self):\n return _ModelFinal(self._gen_model_final(), self._gen_featurizer(), False)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None,\n cache_values=False, inference='auto'):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates functions τ(·,·,·), ∂τ(·,·).\n\n Parameters\n ----------\n Y: (n × d_y) matrix or vector of length n\n Outcomes for each sample\n T: (n × dₜ) matrix or vector of length n\n Treatments for each sample\n X: optional (n × dₓ) matrix\n Features for each sample\n W: optional (n × d_w) matrix\n Controls for each sample\n sample_weight: optional (n,) vector\n Weights for each row\n sample_var: optional (n, n_y) vector\n Variance of sample, in case it corresponds to summary of many samples. Currently\n not in use by this method (as inference method does not require sample variance info).\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the `cv` argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n cache_values: bool, default False\n Whether to cache inputs and first stage results, which will allow refitting a different final model\n inference: string, `Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`) and 'blb'\n (for Bootstrap-of-Little-Bags based inference)\n\n Returns\n -------\n self\n \"\"\"\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=None, groups=groups,\n cache_values=cache_values, inference=inference)\n\n def multitask_model_cate(self):\n # Replacing to remove docstring\n super().multitask_model_cate()\n\n @property\n def multitask_model_final(self):\n return False\n\n @multitask_model_final.setter\n def multitask_model_final(self, value):\n if value:\n raise ValueError(\"Parameter `multitask_model_final` cannot change from `False` for this estimator!\")\n\n @property\n def model_final(self):\n return self._gen_model_final()\n\n @model_final.setter\n def model_final(self, model):\n if model is not None:\n raise ValueError(\"Parameter `model_final` cannot be altered for this estimator!\")\n"
] | [
[
"numpy.zeros",
"numpy.average",
"numpy.mean",
"numpy.moveaxis",
"numpy.arange",
"numpy.all",
"numpy.hstack",
"sklearn.base.clone",
"sklearn.linear_model.LogisticRegressionCV"
]
] |
cuichuan123456/transform-culane | [
"5e1de763100ae53b1c5c66cf168c09999cf75139"
] | [
"db/base.py"
] | [
"import os\nimport h5py\nimport numpy as np\n\nfrom config import system_configs\n\nclass BASE(object):\n def __init__(self):\n self._split = None\n self._db_inds = []\n self._image_ids = []\n\n self._data = None\n self._image_hdf5 = None\n self._image_file = None\n self._image_hdf5_file = None\n\n self._mean = np.zeros((3, ), dtype=np.float32)\n self._std = np.ones((3, ), dtype=np.float32)\n self._eig_val = np.ones((3, ), dtype=np.float32)\n self._eig_vec = np.zeros((3, 3), dtype=np.float32)\n\n self._configs = {}\n self._configs[\"data_aug\"] = True\n\n self._data_rng = None\n\n @property\n def data(self):\n if self._data is None:\n raise ValueError(\"data is not set\")\n return self._data\n\n @property\n def configs(self):\n return self._configs\n\n @property\n def mean(self):\n return self._mean\n\n @property\n def std(self):\n return self._std\n\n @property\n def eig_val(self):\n return self._eig_val\n\n @property\n def eig_vec(self):\n return self._eig_vec\n\n @property\n def db_inds(self):\n return self._db_inds\n\n @property\n def split(self):\n return self._split\n\n def update_config(self, new):\n for key in new:\n if key in self._configs:\n self._configs[key] = new[key]\n\n def image_ids(self, ind):\n return self._image_ids[ind]\n\n def image_file(self, ind):\n\n if self._image_file is None:\n raise ValueError(\"Image path is not initialized\")\n\n image_id = self._image_ids[ind]\n\n if type(self._image_file) == list:\n return self._image_file[ind]\n else:\n return self._image_file.format(image_id)\n\n def write_result(self, ind, all_bboxes, all_scores):\n pass\n\n def evaluate(self, *args):\n pass\n\n def shuffle_inds(self, quiet=False):\n if self._data_rng is None:\n self._data_rng = np.random.RandomState(os.getpid())\n\n if not quiet:\n print(\"shuffling indices...\")\n rand_perm = self._data_rng.permutation(len(self._db_inds))\n self._db_inds = self._db_inds[rand_perm]\n"
] | [
[
"numpy.ones",
"numpy.zeros"
]
] |
yxiong/tensorflow | [
"f71cc62282bf2e066f9ebd08cf3f605fc98c6e41"
] | [
"tensorflow/python/ops/math_grad.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Gradients for operators defined in math_ops.py.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\n\n\ndef _safe_shape_div(x, y):\n \"\"\"Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`.\"\"\"\n return x // math_ops.maximum(y, 1)\n\n\[email protected](\"Sum\")\ndef _SumGrad(op, grad):\n \"\"\"Gradient for Sum.\"\"\"\n # Fast path for when reducing to a scalar and ndims is known: adds only\n # Reshape and Tile ops (and possibly a Shape).\n if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type ==\n \"Const\"):\n rank = op.inputs[0].get_shape().ndims\n axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr(\"value\"))\n if np.array_equal(axes, np.arange(rank)): # Reduce all dims.\n grad = array_ops.reshape(grad, [1] * rank)\n # If shape is not fully defined (but rank is), we use Shape.\n if op.inputs[0].get_shape().is_fully_defined():\n input_shape = op.inputs[0].get_shape().as_list()\n else:\n input_shape = array_ops.shape(op.inputs[0])\n return [array_ops.tile(grad, input_shape), None]\n\n input_shape = array_ops.shape(op.inputs[0])\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return [array_ops.tile(grad, tile_scaling), None]\n\n\ndef _MinOrMaxGrad(op, grad):\n \"\"\"Gradient for Min or Max. Amazingly it's precisely the same code.\"\"\"\n input_shape = array_ops.shape(op.inputs[0])\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n y = op.outputs[0]\n y = array_ops.reshape(y, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n\n # Compute the number of selected (maximum or minimum) elements in each\n # reduction dimension. If there are multiple minimum or maximum elements\n # then the gradient will be divided between them.\n indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)\n num_selected = array_ops.reshape(\n math_ops.reduce_sum(indicators, op.inputs[1]),\n output_shape_kept_dims)\n\n return [math_ops.div(indicators, num_selected) * grad, None]\n\n\[email protected](\"Max\")\ndef _MaxGrad(op, grad):\n \"\"\"Gradient for Max.\"\"\"\n return _MinOrMaxGrad(op, grad)\n\n\[email protected](\"Min\")\ndef _MinGrad(op, grad):\n return _MinOrMaxGrad(op, grad)\n\n\[email protected](\"Mean\")\ndef _MeanGrad(op, grad):\n \"\"\"Gradient for Mean.\"\"\"\n sum_grad = _SumGrad(op, grad)[0]\n input_shape = array_ops.shape(op.inputs[0])\n output_shape = array_ops.shape(op.outputs[0])\n factor = _safe_shape_div(math_ops.reduce_prod(input_shape),\n math_ops.reduce_prod(output_shape))\n return sum_grad / math_ops.cast(factor, sum_grad.dtype), None\n\n\[email protected](\"Prod\")\ndef _ProdGrad(op, grad):\n \"\"\"Gradient for Prod.\"\"\"\n # The gradient can be expressed by dividing the product by each entry of the\n # input tensor, but this approach can't deal with zeros in the input.\n # Here, we avoid this problem by composing the output as a product of two\n # cumprod operations.\n\n input_shape = array_ops.shape(op.inputs[0])\n # Reshape reduction indices for the case where the parameter is a scalar\n reduction_indices = array_ops.reshape(op.inputs[1], [-1])\n\n # Expand grad to full input shape\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n grad = array_ops.tile(grad, tile_scaling)\n\n # Pack all reduced dimensions into a single one, so we can perform the\n # cumprod ops. If the reduction dims list is empty, it defaults to float32,\n # so we need to cast here. We put all the shape-related ops on CPU to avoid\n # copying back and forth, and since listdiff is CPU only.\n with ops.device(\"/cpu:0\"):\n reduced = math_ops.cast(reduction_indices, dtypes.int32)\n idx = math_ops.range(0, array_ops.rank(op.inputs[0]))\n other, _ = array_ops.listdiff(idx, reduced)\n perm = array_ops.concat(0, [reduced, other])\n reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))\n other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))\n permuted = array_ops.transpose(op.inputs[0], perm)\n permuted_shape = array_ops.shape(permuted)\n reshaped = array_ops.reshape(permuted, (reduced_num, other_num))\n\n # Calculate product, leaving out the current entry\n left = math_ops.cumprod(reshaped, axis=0, exclusive=True)\n right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)\n y = array_ops.reshape(left * right, permuted_shape)\n\n # Invert the transpose and reshape operations.\n # Make sure to set the statically known shape information through a reshape.\n out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))\n return array_ops.reshape(out, input_shape), None\n\n\[email protected](\"SegmentSum\")\ndef _SegmentSumGrad(op, grad):\n \"\"\"Gradient for SegmentSum.\"\"\"\n return array_ops.gather(grad, op.inputs[1]), None\n\n\[email protected](\"SegmentMean\")\ndef _SegmentMeanGrad(op, grad):\n \"\"\"Gradient for SegmentMean.\"\"\"\n input_rank = array_ops.rank(op.inputs[0])\n ones_shape = array_ops.concat(\n 0, [array_ops.shape(op.inputs[1]),\n array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)])\n ones = array_ops.fill(ones_shape,\n constant_op.constant(1, dtype=grad.dtype))\n scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))\n return array_ops.gather(scaled_grad, op.inputs[1]), None\n\n\[email protected](\"SparseSegmentSum\")\ndef _SparseSegmentSumGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSum.\"\"\"\n input_rows = array_ops.shape(op.inputs[0])[0]\n return (math_ops.unsorted_segment_sum(\n array_ops.gather(grad, op.inputs[2]),\n op.inputs[1], input_rows), None, None)\n\n\[email protected](\"SparseSegmentMean\")\ndef _SparseSegmentMeanGrad(op, grad):\n \"\"\"Gradient for SparseSegmentMean.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_mean_grad(grad,\n op.inputs[1],\n op.inputs[2],\n dim0),\n None, None)\n\n\[email protected](\"SparseSegmentSqrtN\")\ndef _SparseSegmentSqrtNGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSqrtN.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_sqrt_n_grad(grad,\n op.inputs[1],\n op.inputs[2],\n dim0),\n None, None)\n\n\ndef _SegmentMinOrMaxGrad(op, grad):\n \"\"\"Gradient for SegmentMin and SegmentMax. Both share the same code.\"\"\"\n zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),\n dtype=op.inputs[0].dtype)\n\n # Get the number of selected (minimum or maximum) elements in each segment.\n gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])\n is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n num_selected = math_ops.segment_sum(math_ops.cast(is_selected, grad.dtype),\n op.inputs[1])\n\n # Compute the gradient for each segment. The gradient for the ith segment is\n # divided evenly among the selected elements in that segment.\n weighted_grads = math_ops.div(grad, num_selected)\n gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])\n\n return math_ops.select(is_selected, gathered_grads, zeros), None\n\n\[email protected](\"SegmentMin\")\ndef _SegmentMinGrad(op, grad):\n \"\"\"Gradient for SegmentMin.\"\"\"\n return _SegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"SegmentMax\")\ndef _SegmentMaxGrad(op, grad):\n \"\"\"Gradient for SegmentMax.\"\"\"\n return _SegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"UnsortedSegmentSum\")\ndef _UnsortedSegmentSumGrad(op, grad):\n \"\"\"Gradient for SegmentSum.\"\"\"\n return array_ops.gather(grad, op.inputs[1]), None, None\n\n\[email protected](\"Abs\")\ndef _AbsGrad(op, grad):\n x = op.inputs[0]\n return grad * math_ops.sign(x)\n\n\[email protected](\"Neg\")\ndef _NegGrad(_, grad):\n \"\"\"Returns -grad.\"\"\"\n return -grad\n\n\[email protected](\"Inv\")\ndef _InvGrad(op, grad):\n \"\"\"Returns -grad * (1 / x^2).\"\"\"\n y = op.outputs[0] # y = 1 / x\n # pylint: disable=protected-access\n return gen_math_ops._inv_grad(y, grad)\n\n\[email protected](\"InvGrad\")\ndef _InvGradGrad(op, grad):\n b = op.inputs[1]\n # op.output[0]: y = -b * conj(a)^2\n with ops.control_dependencies([grad.op]):\n ca = math_ops.conj(op.inputs[0])\n cg = math_ops.conj(grad)\n # pylint: disable=protected-access\n return cg * -2.0 * b * ca, gen_math_ops._inv_grad(ca, grad)\n\n\[email protected](\"Square\")\ndef _SquareGrad(op, grad):\n x = op.inputs[0]\n # Added control dependencies to prevent 2*x from being computed too early.\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return grad * (2.0 * x)\n\n\[email protected](\"Sqrt\")\ndef _SqrtGrad(op, grad):\n y = op.outputs[0] # y = x^(1/2)\n return gen_math_ops._sqrt_grad(y, grad)\n\n\[email protected](\"SqrtGrad\")\ndef _SqrtGradGrad(op, grad):\n a = op.inputs[0]\n y = op.outputs[0] # y = 0.5 * b / conj(a)\n with ops.control_dependencies([grad.op]):\n ga = grad / a\n return -math_ops.conj(ga) * y, 0.5 * ga\n\n\[email protected](\"Rsqrt\")\ndef _RsqrtGrad(op, grad):\n y = op.outputs[0] # y = x^(-1/2)\n return gen_math_ops._rsqrt_grad(y, grad)\n\n\[email protected](\"Exp\")\ndef _ExpGrad(op, grad):\n \"\"\"Returns grad * exp(x).\"\"\"\n y = op.outputs[0] # y = e^x\n with ops.control_dependencies([grad.op]):\n y = math_ops.conj(y)\n return grad * y\n\n\[email protected](\"Log\")\ndef _LogGrad(op, grad):\n \"\"\"Returns grad * (1/x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return grad * math_ops.inv(x)\n\n\[email protected](\"Tanh\")\ndef _TanhGrad(op, grad):\n \"\"\"Returns grad * (1 - tanh(x) * tanh(x)).\"\"\"\n y = op.outputs[0] # y = tanh(x)\n with ops.control_dependencies([grad.op]):\n y = math_ops.conj(y)\n # pylint: disable=protected-access\n return gen_math_ops._tanh_grad(y, grad)\n\n\[email protected](\"TanhGrad\")\ndef _TanhGradGrad(op, grad):\n with ops.control_dependencies([grad.op]):\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n # pylint: disable=protected-access\n return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)\n\n\[email protected](\"Erf\")\ndef _ErfGrad(op, grad):\n \"\"\"Returns grad * 2/sqrt(pi) * exp(-x**2).\"\"\"\n x = op.inputs[0]\n two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))\n\n\[email protected](\"Erfc\")\ndef _ErfcGrad(op, grad):\n \"\"\"Returns -grad * 2/sqrt(pi) * exp(-x**2).\"\"\"\n x = op.inputs[0]\n minus_two_over_root_pi = constant_op.constant(-2 / np.sqrt(np.pi),\n dtype=grad.dtype)\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))\n\n\[email protected](\"Lgamma\")\ndef _LgammaGrad(op, grad):\n \"\"\"Returns grad * digamma(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return grad * math_ops.digamma(x)\n\n\[email protected](\"Digamma\")\ndef _DigammaGrad(op, grad):\n \"\"\"Compute gradient of the digamma function with respect to its argument.\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)\n\n\[email protected](\"Igamma\")\ndef _IgammaGrad(op, grad):\n \"\"\"Returns gradient of igamma(a, x) with respect to a and x.\"\"\"\n # TODO(ebrevdo): Perhaps add the derivative w.r.t. a\n a = op.inputs[0]\n x = op.inputs[1]\n sa = array_ops.shape(a)\n sx = array_ops.shape(x)\n unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)\n\n # Perform operations in log space before summing, because Gamma(a)\n # and Gamma'(a) can grow large.\n partial_x = math_ops.exp(-x + (a-1) * math_ops.log(x) - math_ops.lgamma(a))\n return (None,\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Igammac\")\ndef _IgammacGrad(op, grad):\n \"\"\"Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x.\"\"\"\n return [-1 * g if g is not None else None for g in _IgammaGrad(op, grad)]\n\n\[email protected](\"Zeta\")\ndef _ZetaGrad(op, grad):\n \"\"\"Returns gradient of zeta(x, q) with respect to x and q.\"\"\"\n # TODO(tillahoffmann): Add derivative with respect to x\n x = op.inputs[0]\n q = op.inputs[1]\n # Broadcast gradients\n sx = array_ops.shape(x)\n sq = array_ops.shape(q)\n unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)\n # Evaluate gradient\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n q = math_ops.conj(q)\n partial_q = -x * math_ops.zeta(x + 1, q)\n return (None,\n array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))\n\n\[email protected](\"Polygamma\")\ndef _PolygammaGrad(op, grad):\n \"\"\"Returns gradient of psi(n, x) with respect to n and x.\"\"\"\n # TODO(tillahoffmann): Add derivative with respect to n\n n = op.inputs[0]\n x = op.inputs[1]\n # Broadcast gradients\n sn = array_ops.shape(n)\n sx = array_ops.shape(x)\n unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)\n # Evaluate gradient\n with ops.control_dependencies([grad.op]):\n n = math_ops.conj(n)\n x = math_ops.conj(x)\n partial_x = math_ops.polygamma(n + 1, x)\n return (None,\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Sigmoid\")\ndef _SigmoidGrad(op, grad):\n \"\"\"Returns grad * sigmoid(x) * (1 - sigmoid(x)).\"\"\"\n y = op.outputs[0] # y = sigmoid(x)\n with ops.control_dependencies([grad.op]):\n y = math_ops.conj(y)\n # pylint: disable=protected-access\n return gen_math_ops._sigmoid_grad(y, grad)\n\n\[email protected](\"SigmoidGrad\")\ndef _SigmoidGradGrad(op, grad):\n with ops.control_dependencies([grad.op]):\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n gb = grad * b\n # pylint: disable=protected-access\n return gb - 2.0 * gb * a, gen_math_ops._sigmoid_grad(a, grad)\n\n\[email protected](\"Sign\")\ndef _SignGrad(op, _):\n \"\"\"Returns 0.\"\"\"\n x = op.inputs[0]\n return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)\n\n\[email protected](\"Sin\")\ndef _SinGrad(op, grad):\n \"\"\"Returns grad * cos(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return grad * math_ops.cos(x)\n\n\[email protected](\"Cos\")\ndef _CosGrad(op, grad):\n \"\"\"Returns grad * -sin(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n return -grad * math_ops.sin(x)\n\n\[email protected](\"Tan\")\ndef _TanGrad(op, grad):\n \"\"\"Returns grad * 1/sec^2(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n secx = math_ops.inv(math_ops.cos(x))\n secx2 = math_ops.square(secx)\n return grad * secx2\n\n\[email protected](\"Asin\")\ndef _AsinGrad(op, grad):\n \"\"\"Returns grad * 1/sqrt(1-x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n den = math_ops.sqrt(math_ops.sub(one, x2))\n inv = math_ops.inv(den)\n return grad * inv\n\n\[email protected](\"Acos\")\ndef _AcosGrad(op, grad):\n \"\"\"Returns grad * -1/sqrt(1-x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n den = math_ops.sqrt(math_ops.sub(one, x2))\n inv = math_ops.inv(den)\n return -grad * inv\n\n\[email protected](\"Atan\")\ndef _AtanGrad(op, grad):\n \"\"\"Returns grad * 1/ (1 + x^2)\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad.op]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n inv = math_ops.inv(math_ops.add(one, x2))\n return grad * inv\n\n\[email protected](\"AddN\")\ndef _AddNGrad(op, grad):\n \"\"\"Copies the gradient to all inputs.\"\"\"\n # Not broadcasting.\n return [grad] * len(op.inputs)\n\n\[email protected](\"Add\")\ndef _AddGrad(op, grad):\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)\n return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),\n array_ops.reshape(math_ops.reduce_sum(grad, ry), sy))\n\n\[email protected](\"Sub\")\ndef _SubGrad(op, grad):\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)\n return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),\n array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))\n\n\[email protected](\"Mul\")\ndef _MulGrad(op, grad):\n \"\"\"The gradient of scalar multiplication.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, \" vs. \", y.dtype)\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),\n array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))\n\n\[email protected](\"Div\")\ndef _DivGrad(op, grad):\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) # pylint: disable=protected-access\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n return (array_ops.reshape(math_ops.reduce_sum(grad / y, rx), sx),\n array_ops.reshape(math_ops.reduce_sum(grad *\n (-x / math_ops.square(y)), ry), sy))\n\n\[email protected](\"Pow\")\ndef _PowGrad(op, grad):\n \"\"\"Returns grad * (y*x^(y-1), z*log(x)).\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n z = op.outputs[0]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n z = math_ops.conj(z)\n gx = array_ops.reshape(\n math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)\n # Avoid false singularity at x = 0\n if x.dtype.is_complex:\n # real(x) < 0 is fine for the complex case\n log_x = math_ops.select(\n math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))\n else:\n # There's no sensible real value to return if x < 0, so return 0\n log_x = math_ops.select(x > 0, math_ops.log(x), array_ops.zeros_like(x))\n gy = array_ops.reshape(\n math_ops.reduce_sum(grad * z * log_x, ry), sy)\n return gx, gy\n\n\ndef _MaximumMinimumGrad(op, grad, selector_op):\n \"\"\"Factor out the code for the gradient of Maximum or Minimum.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n gdtype = grad.dtype\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n gradshape = array_ops.shape(grad)\n zeros = array_ops.zeros(gradshape, gdtype)\n xmask = selector_op(x, y)\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)\n xgrad = math_ops.select(xmask, grad, zeros)\n ygrad = math_ops.select(math_ops.logical_not(xmask), grad, zeros)\n gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)\n gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)\n return (gx, gy)\n\n\[email protected](\"Maximum\")\ndef _MaximumGrad(op, grad):\n \"\"\"Returns grad*(x > y, x <= y) with type of grad.\"\"\"\n return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)\n\n\[email protected](\"Minimum\")\ndef _MinimumGrad(op, grad):\n \"\"\"Returns grad*(x < y, x >= y) with type of grad.\"\"\"\n return _MaximumMinimumGrad(op, grad, math_ops.less_equal)\n\n\[email protected](\"SquaredDifference\")\ndef _SquaredDifferenceGrad(op, grad):\n \"\"\"Returns the gradient for (x-y)^2.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n # pylint: disable=protected-access\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)\n # pylint: enable=protected-access\n # .op works with Tensors or IndexedSlices\n with ops.control_dependencies([grad.op]):\n # The parens ensure that if grad is IndexedSlices, it'll get multiplied by\n # Tensor (not a number like 2.0) which causes it to convert to Tensor.\n x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)\n return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),\n -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))\n\n\n# Logical operations have no gradients.\nops.NoGradient(\"Less\")\nops.NoGradient(\"LessEqual\")\nops.NoGradient(\"Greater\")\nops.NoGradient(\"GreaterEqual\")\nops.NoGradient(\"Equal\")\nops.NoGradient(\"NotEqual\")\nops.NoGradient(\"LogicalAnd\")\nops.NoGradient(\"LogicalOr\")\nops.NoGradient(\"LogicalNot\")\n\n\[email protected](\"Select\")\ndef _SelectGrad(op, grad):\n c = op.inputs[0]\n x = op.inputs[1]\n zeros = array_ops.zeros_like(x)\n return (None, math_ops.select(c, grad, zeros),\n math_ops.select(c, zeros, grad))\n\n\[email protected](\"MatMul\")\ndef _MatMulGrad(op, grad):\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n if not t_a and not t_b:\n return (math_ops.matmul(grad, op.inputs[1], transpose_b=True),\n math_ops.matmul(op.inputs[0], grad, transpose_a=True))\n elif not t_a and t_b:\n return (math_ops.matmul(grad, op.inputs[1]),\n math_ops.matmul(grad, op.inputs[0], transpose_a=True))\n elif t_a and not t_b:\n return (math_ops.matmul(op.inputs[1], grad, transpose_b=True),\n math_ops.matmul(op.inputs[0], grad))\n elif t_a and t_b:\n return (math_ops.matmul(op.inputs[1], grad, transpose_a=True,\n transpose_b=True),\n math_ops.matmul(grad, op.inputs[0], transpose_a=True,\n transpose_b=True))\n\n\[email protected](\"SparseMatMul\")\ndef _SparseMatMulGrad(op, grad):\n \"\"\"Gradient for SparseMatMul.\"\"\"\n\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n is_sparse = {\n op.inputs[0]: op.get_attr(\"a_is_sparse\"),\n op.inputs[1]: op.get_attr(\"b_is_sparse\"),\n # Use heuristic to figure out if grad might be sparse\n grad: (grad.op.type == \"ReluGrad\")\n }\n def _SparseMatMul(t1, t2, out_dtype,\n transpose_a=False, transpose_b=False):\n \"\"\"Helper function to create SparseMatMul op.\"\"\"\n\n assert t1 in is_sparse and t2 in is_sparse\n t1_sparse = is_sparse[t1]\n t2_sparse = is_sparse[t2]\n if transpose_b:\n t2 = array_ops.transpose(t2)\n transpose_b = False\n prod = math_ops.matmul(t1, t2,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=t1_sparse,\n b_is_sparse=t2_sparse)\n if prod.dtype != out_dtype:\n prod = math_ops.cast(prod, out_dtype)\n return prod\n\n dtype_a = op.inputs[0].dtype\n dtype_b = op.inputs[1].dtype\n if not t_a and not t_b:\n return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),\n _SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))\n elif not t_a and t_b:\n return (_SparseMatMul(grad, op.inputs[1], dtype_a),\n _SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))\n elif t_a and not t_b:\n return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),\n _SparseMatMul(op.inputs[0], grad, dtype_b))\n elif t_a and t_b:\n return (_SparseMatMul(op.inputs[1], grad, dtype_a,\n transpose_a=True, transpose_b=True),\n _SparseMatMul(grad, op.inputs[0], dtype_b,\n transpose_a=True, transpose_b=True))\n\n\[email protected](\"Floor\")\ndef _FloorGrad(_, unused_grad):\n return [None]\n\n\[email protected](\"BatchMatMul\")\ndef _BatchMatMul(op, grad):\n \"\"\"Returns the gradient of x and y given the gradient of x * y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n adj_x = op.get_attr(\"adj_x\")\n adj_y = op.get_attr(\"adj_y\")\n\n if not adj_x:\n if not adj_y:\n grad_x = math_ops.batch_matmul(grad, y, False, True)\n grad_y = math_ops.batch_matmul(x, grad, True, False)\n else:\n grad_x = math_ops.batch_matmul(grad, y, False, False)\n grad_y = math_ops.batch_matmul(grad, x, True, False)\n else:\n if not adj_y:\n grad_x = math_ops.batch_matmul(y, grad, False, True)\n grad_y = math_ops.batch_matmul(x, grad, False, False)\n else:\n grad_x = math_ops.batch_matmul(y, grad, True, True)\n grad_y = math_ops.batch_matmul(grad, x, True, True)\n\n return grad_x, grad_y\n\n\nops.NoGradient(\"Range\")\nops.NoGradient(\"LinSpace\")\n\n\[email protected](\"Complex\")\ndef _ComplexGrad(op, grad):\n \"\"\"Returns the real and imaginary components of 'grad', respectively.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)\n return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),\n array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))\n\n\[email protected](\"Real\")\ndef _RealGrad(_, grad):\n \"\"\"Returns 'grad' as the real part and set the imaginary part 0.\"\"\"\n zero = constant_op.constant(0, dtype=grad.dtype)\n return math_ops.complex(grad, zero)\n\n\[email protected](\"Imag\")\ndef _ImagGrad(_, grad):\n \"\"\"Returns 'grad' as the imaginary part and set the real part 0.\"\"\"\n zero = constant_op.constant(0, dtype=grad.dtype)\n return math_ops.complex(zero, grad)\n\n\[email protected](\"Conj\")\ndef _ConjGrad(_, grad):\n \"\"\"Returns the complex conjugate of grad.\"\"\"\n return math_ops.conj(grad)\n\n\[email protected](\"ComplexAbs\")\ndef _ComplexAbsGrad(op, grad):\n \"\"\"Returns the gradient of ComplexAbs.\"\"\"\n # TODO(b/27786104): The cast to complex could be removed once arithmetic\n # supports mixtures of complex64 and real values.\n return (math_ops.complex(grad, array_ops.zeros_like(grad)) *\n math_ops.sign(op.inputs[0]))\n\n\[email protected](\"Cast\")\ndef _CastGrad(op, grad):\n t = [dtypes.float16, dtypes.float32, dtypes.float64,\n dtypes.bfloat16, dtypes.complex64, dtypes.complex128]\n src_type = op.inputs[0].dtype.base_dtype\n dst_type = grad.dtype.base_dtype\n if src_type in t and dst_type in t:\n return math_ops.cast(grad, src_type)\n else:\n return None\n\n\[email protected](\"FFT\")\ndef _FFTGrad(_, grad):\n size = math_ops.cast(array_ops.size(grad), dtypes.float32)\n return math_ops.ifft(grad) * math_ops.complex(size, 0.)\n\n\[email protected](\"IFFT\")\ndef _IFFTGrad(_, grad):\n rsize = 1. / math_ops.cast(array_ops.size(grad), dtypes.float32)\n return math_ops.fft(grad) * math_ops.complex(rsize, 0.)\n\n\[email protected](\"FFT2D\")\ndef _FFT2DGrad(_, grad):\n size = math_ops.cast(array_ops.size(grad), dtypes.float32)\n return math_ops.ifft2d(grad) * math_ops.complex(size, 0.)\n\n\[email protected](\"IFFT2D\")\ndef _IFFT2DGrad(_, grad):\n rsize = 1. / math_ops.cast(array_ops.size(grad), dtypes.float32)\n return math_ops.fft2d(grad) * math_ops.complex(rsize, 0.)\n\n\[email protected](\"FFT3D\")\ndef _FFT3DGrad(_, grad):\n size = math_ops.cast(array_ops.size(grad), dtypes.float32)\n return math_ops.ifft3d(grad) * math_ops.complex(size, 0.)\n\n\[email protected](\"IFFT3D\")\ndef _IFFT3DGrad(_, grad):\n rsize = 1. / math_ops.cast(array_ops.size(grad), dtypes.float32)\n return math_ops.fft3d(grad) * math_ops.complex(rsize, 0.)\n\n\ndef _FFTSizeForGrad(grad, rank):\n return math_ops.reduce_prod(array_ops.slice(\n array_ops.reverse(\n array_ops.shape(grad), (True,)), (0,), (rank,)))\n\n\[email protected](\"BatchFFT\")\ndef _BatchFFTGrad(_, grad):\n size = math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)\n return math_ops.batch_ifft(grad) * math_ops.complex(size, 0.)\n\n\[email protected](\"BatchIFFT\")\ndef _BatchIFFTGrad(_, grad):\n rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)\n return math_ops.batch_fft(grad) * math_ops.complex(rsize, 0.)\n\n\[email protected](\"BatchFFT2D\")\ndef _BatchFFT2DGrad(_, grad):\n size = math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)\n return math_ops.batch_ifft2d(grad) * math_ops.complex(size, 0.)\n\n\[email protected](\"BatchIFFT2D\")\ndef _BatchIFFT2DGrad(_, grad):\n rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)\n return math_ops.batch_fft2d(grad) * math_ops.complex(rsize, 0.)\n\n\[email protected](\"BatchFFT3D\")\ndef _BatchFFT3DGrad(_, grad):\n size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)\n return math_ops.batch_ifft3d(grad) * math_ops.complex(size, 0.)\n\n\[email protected](\"BatchIFFT3D\")\ndef _BatchIFFT3DGrad(_, grad):\n rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)\n return math_ops.batch_fft3d(grad) * math_ops.complex(rsize, 0.)\n\n\[email protected](\"Cross\")\ndef _CrossGrad(op, grad):\n u = op.inputs[0]\n v = op.inputs[1]\n return (math_ops.cross(v, grad), math_ops.cross(grad, u))\n\n\[email protected](\"Cumsum\")\ndef _CumsumGrad(op, grad):\n axis = op.inputs[1]\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n return [math_ops.cumsum(grad, axis, exclusive=exclusive,\n reverse=not reverse), None]\n\n\[email protected](\"Cumprod\")\ndef _CumprodGrad(op, grad):\n x = op.inputs[0]\n axis = op.inputs[1]\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n\n # TODO This fails when x contains 0 and should be fixed\n prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)\n out = math_ops.cumsum(prod * grad, axis, exclusive=exclusive,\n reverse=not reverse)\n return [out / x, None]\n"
] | [
[
"tensorflow.python.ops.math_ops.batch_ifft",
"tensorflow.python.ops.array_ops.invert_permutation",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.math_ops.fft",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.gen_math_ops._inv_grad",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.math_ops.sub",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.gen_math_ops._sigmoid_grad",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.math_ops.polygamma",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cross",
"tensorflow.python.ops.math_ops.inv",
"tensorflow.python.ops.math_ops.digamma",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.fft3d",
"tensorflow.python.ops.math_ops.ifft",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.fft2d",
"tensorflow.python.ops.math_ops.lgamma",
"tensorflow.python.ops.math_ops.log",
"numpy.arange",
"tensorflow.python.ops.math_ops.complex",
"tensorflow.python.ops.math_ops.cumsum",
"numpy.sqrt",
"tensorflow.python.ops.math_ops.real",
"tensorflow.python.ops.math_ops.div",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.gen_math_ops._sqrt_grad",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.math_ops.batch_fft2d",
"tensorflow.python.ops.math_ops.sign",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.math_ops.sparse_segment_sqrt_n_grad",
"tensorflow.python.ops.math_ops.batch_fft3d",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.ops.math_ops.cumprod",
"tensorflow.python.ops.math_ops.sparse_segment_mean_grad",
"tensorflow.python.ops.math_ops.batch_fft",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.math_ops.batch_ifft2d",
"tensorflow.python.ops.gen_math_ops._rsqrt_grad",
"tensorflow.python.framework.ops.NoGradient",
"tensorflow.python.ops.math_ops.batch_matmul",
"tensorflow.python.ops.math_ops.reduced_shape",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.math_ops.sin",
"tensorflow.python.ops.array_ops.listdiff",
"tensorflow.python.ops.math_ops.select",
"tensorflow.python.ops.math_ops.ifft2d",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.ops.math_ops.scalar_mul",
"tensorflow.python.ops.math_ops.batch_ifft3d",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.gen_math_ops._tanh_grad",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.math_ops.ifft3d",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_array_ops._broadcast_gradient_args",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.math_ops.conj",
"tensorflow.python.ops.math_ops.zeta",
"tensorflow.python.ops.math_ops.segment_sum",
"tensorflow.python.ops.math_ops.cos",
"tensorflow.python.ops.math_ops.imag"
]
] |
Krish-sysadmin/genienlp | [
"3586e4368eb0b0756a772294daedc043ce55454c"
] | [
"genienlp/models/common.py"
] | [
"#\n# Copyright (c) 2018, Salesforce, Inc.\n# The Board of Trustees of the Leland Stanford Junior University\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nfrom torch.jit import Final\nfrom torch.nn import functional as F\n\nINF = 1e10\nEPSILON = 1e-10\n\n\nclass MultiLSTMCell(nn.Module):\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(MultiLSTMCell, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for i in range(num_layers):\n self.layers.append(nn.LSTMCell(input_size, rnn_size))\n input_size = rnn_size\n\n def forward(self, input, hidden):\n h_0, c_0 = hidden\n h_1, c_1 = [], []\n for i, layer in enumerate(self.layers):\n input = self.dropout(input)\n h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))\n input = h_1_i\n h_1 += [h_1_i]\n c_1 += [c_1_i]\n\n h_1 = torch.stack(h_1)\n c_1 = torch.stack(c_1)\n\n return input, (h_1, c_1)\n\n\n# torch.matmul can't do (4, 3, 2) @ (4, 2) -> (4, 3)\ndef matmul(x, y):\n if x.dim() == y.dim():\n return x @ y\n if x.dim() == y.dim() - 1:\n return (x.unsqueeze(-2) @ y).squeeze(-2)\n return (x @ y.unsqueeze(-2)).squeeze(-2)\n\n\nclass LayerNorm(nn.Module):\n def __init__(self, d_model, eps=1e-6):\n super().__init__()\n self.gamma = nn.Parameter(torch.ones(d_model))\n self.beta = nn.Parameter(torch.zeros(d_model))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.gamma * (x - mean) / (std + self.eps) + self.beta\n\n\ndef mask(targets, out, squash=True, pad_idx=1):\n mask = targets != pad_idx\n out_mask = mask.unsqueeze(-1).expand_as(out).contiguous()\n if squash:\n out_after = out[out_mask].contiguous().view(-1, out.size(-1))\n else:\n out_after = out * out_mask.float()\n targets_after = targets[mask]\n return out_after, targets_after\n\n\nclass LinearFeedforward(nn.Module):\n def __init__(self, d_in, d_hid, d_out, activation='relu', dropout=0.2):\n super().__init__()\n self.feedforward = Feedforward(d_in, d_hid, activation=activation)\n self.linear = Linear(d_hid, d_out)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.dropout(self.linear(self.feedforward(x)))\n\n\nclass Linear(nn.Linear):\n def forward(self, x):\n size = x.size()\n return super().forward(x.contiguous().view(-1, size[-1])).view(*size[:-1], -1)\n\n\nclass Feedforward(nn.Module):\n def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2):\n super().__init__()\n if activation is not None:\n self.activation = getattr(torch, activation)\n else:\n self.activation = lambda x: x\n self.linear = Linear(d_in, d_out, bias=bias)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.activation(self.linear(self.dropout(x)))\n\n\nclass CombinedEmbedding(nn.Module):\n project: Final[bool]\n dimension: Final[int]\n\n def __init__(self, numericalizer, pretrained_embeddings, output_dimension, trained_dimension=0, project=True):\n super().__init__()\n self.project = project\n self.pretrained_embeddings = nn.ModuleList(pretrained_embeddings)\n\n dimension = 0\n for embedding in self.pretrained_embeddings:\n dimension += embedding.dim\n\n if trained_dimension > 0:\n self.trained_embeddings = nn.Embedding(numericalizer.num_tokens, trained_dimension)\n dimension += trained_dimension\n else:\n self.trained_embeddings = None\n\n if self.project:\n self.projection = Feedforward(dimension, output_dimension)\n else:\n assert dimension == output_dimension, (dimension, output_dimension)\n self.dimension = output_dimension\n\n def resize_embedding(self, new_vocab_size):\n if self.trained_embeddings is None:\n # we are not training embeddings at all\n return\n dimensions = self.trained_embeddings.weight.shape\n if new_vocab_size == dimensions[0]:\n return\n assert new_vocab_size > dimensions[0], 'Cannot shrink the embedding matrix'\n resized_embeddings = nn.Embedding(new_vocab_size, dimensions[1])\n resized_embeddings.weight.data[0 : dimensions[0], :] = self.trained_embeddings.weight.data\n self.trained_embeddings = resized_embeddings\n\n def _combine_embeddings(self, embeddings):\n\n emb = torch.cat(embeddings, dim=2)\n if self.project:\n emb = self.projection(emb)\n return emb\n\n def forward(self, x, padding=None):\n embedded: List[torch.Tensor] = []\n if self.pretrained_embeddings is not None:\n embedded += [emb(x, padding=padding) for emb in self.pretrained_embeddings]\n\n if self.trained_embeddings is not None:\n trained_vocabulary_size = self.trained_embeddings.weight.size()[0]\n valid_x = torch.lt(x, trained_vocabulary_size)\n masked_x = torch.where(valid_x, x, torch.zeros_like(x))\n output = self.trained_embeddings(masked_x)\n embedded.append(output)\n\n return self._combine_embeddings(embedded)\n\n\nclass LSTMDecoderAttention(nn.Module):\n def __init__(self, dim, dot=False):\n super().__init__()\n self.linear_in = nn.Linear(dim, dim, bias=False)\n self.linear_out = nn.Linear(2 * dim, dim, bias=False)\n self.tanh = nn.Tanh()\n self.mask = None\n self.dot = dot\n\n def applyMasks(self, context_mask):\n # context_mask is batch x encoder_time, convert it to batch x 1 x encoder_time\n self.context_mask = context_mask.unsqueeze(1)\n\n def forward(self, input: torch.Tensor, context: torch.Tensor):\n # input is batch x decoder_time x dim\n # context is batch x encoder_time x dim\n # output will be batch x decoder_time x dim\n # context_attention will be batch x decoder_time x encoder_time\n\n if not self.dot:\n targetT = self.linear_in(input) # batch x decoder_time x dim x 1\n else:\n targetT = input\n\n transposed_context = torch.transpose(context, 2, 1)\n context_scores = torch.matmul(targetT, transposed_context)\n context_scores.masked_fill_(self.context_mask, -float('inf'))\n context_attention = F.softmax(context_scores, dim=-1) + EPSILON\n\n # convert context_attention to batch x decoder_time x 1 x encoder_time\n # convert context to batch x 1 x encoder_time x dim\n # context_alignment will be batch x decoder_time x 1 x dim\n context_alignment = torch.matmul(context_attention.unsqueeze(2), context.unsqueeze(1))\n # squeeze out the extra dimension\n context_alignment = context_alignment.squeeze(2)\n\n combined_representation = torch.cat([input, context_alignment], 2)\n output = self.tanh(self.linear_out(combined_representation))\n\n return output, context_attention\n\n\nclass LabelSmoothingCrossEntropy(torch.nn.Module):\n def __init__(self, smoothing):\n super(LabelSmoothingCrossEntropy, self).__init__()\n self.smoothing = smoothing\n\n def forward(self, x, target, ignore_index):\n \"\"\"\n Inputs:\n x: Tensor of shape (N, vocab_size)\n target: Tensor of shape (N, ) where N is batch_size * sequence_length\n ignore_index: this index in the vocabulary is ignored when calculating loss. This is useful for pad tokens.\n Outputs:\n loss: a Tensor of shape (N, )\n \"\"\"\n logprobs = F.log_softmax(x, dim=-1)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = (1.0 - self.smoothing) * nll_loss + self.smoothing * smooth_loss\n loss.masked_fill_((target == ignore_index), 0)\n return loss\n"
] | [
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.cat",
"torch.stack",
"torch.nn.LSTMCell",
"torch.nn.ModuleList",
"torch.nn.Tanh",
"torch.zeros_like",
"torch.nn.functional.log_softmax",
"torch.ones",
"torch.nn.functional.softmax",
"torch.lt",
"torch.transpose",
"torch.matmul",
"torch.nn.Embedding"
]
] |
TJUdyk/Matrix_RENet | [
"5d066e4e08e412b1f880c63743edfdb72bdc7138"
] | [
"models/resnet.py"
] | [
"import torch.nn as nn\n\n# This ResNet network was designed following the practice of the following papers:\n# TADAM: Task dependent adaptive metric for improved few-shot learning (Oreshkin et al., in NIPS 2018) and\n# A Simple Neural Attentive Meta-Learner (Mishra et al., in ICLR 2018).\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.LeakyReLU(0.1)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv3x3(planes, planes)\n self.bn3 = nn.BatchNorm2d(planes)\n self.maxpool = nn.MaxPool2d(stride)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n out = self.maxpool(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, args, block=BasicBlock):\n self.inplanes = 3\n super(ResNet, self).__init__()\n\n self.args = args\n self.layer1 = self._make_layer(block, 64, stride=2)\n self.layer2 = self._make_layer(block, 160, stride=2)\n self.layer3 = self._make_layer(block, 320, stride=2)\n self.layer4 = self._make_layer(block, 640, stride=2)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.layer1(x)\n\n x = self.layer2(x)\n\n x = self.layer3(x)\n\n x = self.layer4(x)\n\n return x\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.nn.Conv2d"
]
] |
kaiott/pid-tools | [
"b625a457a4a5f98a32912400ffbc3ff0eb5a7c68"
] | [
"main.py"
] | [
"import PIDUtils as pid\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport control as co\n\n\n# specifications, ep=0 assumed\nte = 2.65\ndelta = 0.1\n\n# Process\nK=7\nprocess_poles = np.array([0, -12])\nprocess_zeros = np.array([])\n\n# Initial guess PID\npid_poles = np.array([0])\npid_zeros = np.array([])\n\n# limit pole\ns = -1.50943+2.0594j\n\n# specifications\nsigma = 4/te\ny_p_x = -np.pi/np.log(delta)\npoles = np.append(process_poles, pid_poles)\nzeros = np.append(process_zeros, pid_zeros)\n\n#argument and modulo criterion to figure out paramters of PID\nz = pid.argument_criterion(s=s, poles=poles, zeros=zeros)\npid.print_text(z=z, s=s, poles=poles, zeros=zeros)\npid_zeros = np.append(pid_zeros, z)\nzeros = np.append(zeros, z)\n\nkC = pid.modulo_criterion(s=s, poles=poles, zeros=zeros, K=K)\nprint(f'solution at kC = { kC:.2f}')\n\nG = K * co.tf(np.poly(process_zeros), np.poly(process_poles))\n\nPI = co.tf(np.poly(pid_zeros),np.poly(pid_poles))\nprint(G)\nprint(PI)\n\nGcl = co.feedback(0.0001*G*PI)\nco.root_locus(Gcl, Plot=True, grid=False)\nGcl=Gcl.minreal(tol=0.01)\nprint(Gcl)\n#co.root_locus(Gcl, Plot=True, grid=False)\n\nPI = kC*PI\nGcl = co.feedback(G*PI)\nGcl=Gcl.minreal(tol=0.01)\nprint(Gcl)\nprint(co.pole(Gcl))\nprint(co.zero(Gcl))\n\npoles_and_zeros = np.append(np.append(np.append(zeros, poles), s), np.conj(s))\nymin=np.min(np.imag(poles_and_zeros)) - 2\nymax=np.max(np.imag(poles_and_zeros)) + 2\nxmin=np.min(np.real(poles_and_zeros)) -2\nxmax=np.max(np.real(poles_and_zeros)) + 2\nplt.ylim(ymin, ymax)\nplt.xlim(xmin, xmax)\n\nfor root in co.pole(Gcl):\n\tplt.plot(np.real(root), np.imag(root), color='red', marker='*')\n\tplt.text(np.real(root)+0.1, np.imag(root) + (ymax-ymin)/20*(1 if np.imag(root) > 0 else -1), f'{root:.2f}', color='red')\n\nx = np.real(s)\ny = np.imag(s)\nplt.fill_between([0, xmax*1.1], ymin*1.1, ymax*1.1, color='red', alpha=0.1)\nplt.fill_between([xmin*1.1, 0], ymax*1.1, [-y_p_x*xmin*1.1, 0], color='red', alpha=0.1)\nplt.fill_between([xmin*1.1, 0], [y_p_x*xmin*1.1, 0], ymin*1.1, color='red', alpha=0.1)\nplt.fill_between([x, 0], [y, 0], [-y, 0], color='red', alpha=0.1)\n\nplt.grid()\nplt.show()"
] | [
[
"numpy.array",
"matplotlib.pyplot.xlim",
"numpy.log",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylim",
"numpy.real",
"numpy.poly",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.show",
"numpy.conj",
"numpy.append",
"numpy.imag"
]
] |
AlokBharadwaj/ipl-prediction | [
"c4d6afeb6dc7e0bd65366c05ce3b5df030f7c538"
] | [
"ipl_chances.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 25 19:01:42 2020\r\n\r\n@author: Alok\r\n\"\"\"\r\n\r\n# Program to simulate winning probabilites for the remaining matches in IPLT20 2020 edition\r\nfrom utils import shorthand\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom statistics import mode\r\nimport pandas as pd\r\nfrom web_scrape import *\r\nimport seaborn as sns\r\nfrom tabulate import tabulate\r\nfrom datetime import datetime\r\n\r\n'''\r\npast_performance_influence = 'weak'\r\nall_match_stats,_ = get_IPL_statistics(verbose=False)\r\nax = all_match_stats['winner_runs'].plot.kde()\r\nruns = ax.get_children()[0]._x\r\nprob = ax.get_children()[0]._y\r\n\r\nax2 = all_match_stats['win_run_rate'].plot.kde()\r\n\r\nrunrate = ax2.get_children()[0]._x\r\nprobrate = ax2.get_children()[0]._y\r\n\r\n'''\r\n\r\nclass league:\r\n ''' \r\n Creates a league object which have the following properties: \"Teams\". \r\n self.teams is just a list of teams, where each team is of the class 'team'\r\n '''\r\n \r\n def __init__(self,teams):\r\n '''\r\n A python list of team objects\r\n '''\r\n self.teams = teams\r\n \r\n def standings(self):\r\n '''\r\n Gives the current standings of all the teams in the league as a dictionary.\r\n The keys are arranged as per their rank order, which is calculated from both points \r\n and NRR\r\n '''\r\n \r\n team_list = []\r\n for team in self.teams:\r\n team_list.append([team.id,team.point,team.nrr])\r\n \r\n team_list = np.array(team_list)\r\n \r\n sorted_teams = team_list[np.lexsort((team_list[:,2],team_list[:,1]))][::-1]\r\n sorted_teams_with_name = {}\r\n for i,team_row in enumerate(sorted_teams):\r\n team_id = team_row[0]\r\n points = team_row[1]\r\n nrr = team_row[2]\r\n team = self.get_team_from_id(team_id)\r\n sorted_teams_with_name[i] = [team.name,team.point,team.nrr,team]\r\n return sorted_teams_with_name\r\n \r\n def get_team_from_id(self,team_id):\r\n for team in self.teams:\r\n if team.id == team_id:\r\n return team\r\n \r\n def get_team_from_name(self,team_name):\r\n for team in self.teams:\r\n if team.name == team_name:\r\n return team\r\n def copy(self):\r\n copied_list = [team(x.id,x.score,x.nrr,x.name) for x in self.teams]\r\n return league(copied_list)\r\n \r\n def get_remaining_matches(self, remaining_match_dictionary):\r\n remaining_matches = []\r\n for x in schedule_as_of_today.values():\r\n team1 = self.get_team_from_name(x[0])\r\n team2 = self.get_team_from_name(x[1])\r\n prob_team_1 = 0.5\r\n prob_team_2 = 0.5\r\n remaining_matches.append((team1,team2,prob_team_1,prob_team_2))\r\n return remaining_matches\r\n \r\n \r\n \r\nclass team:\r\n ''' \r\n A python class which has the following properties: \r\n id: is a number 0 to 7 to identify a team\r\n point: is a number which gives the total points a team has had in the table\r\n nrr: teh overall NRR a team has achieved until this point\r\n name: the team name given as a string\r\n '''\r\n \r\n def __init__(self,id,initial_point,initial_nrr,name):\r\n self.id = id\r\n self.point = initial_point\r\n self.for_runs = 0\r\n self.against_runs = 0 \r\n self.nrr = initial_nrr\r\n self.name = name\r\n \r\n def add_points(self,point):\r\n self.point += point\r\n \r\n def add_nrr(self,nrr):\r\n self.nrr += nrr\r\n \r\n def copy(self):\r\n return team(self.id, self.point, self.nrr, self.name)\r\n\r\nclass tournament:\r\n '''\r\n the tournament class has the following properties: \r\n matches: This contains the list of all remaining matches in the tourname\r\n \r\n \r\n \r\n '''\r\n def __init__(self,matches):\r\n '''\r\n A \"matches\" variable is a list of tuples in teh following order: \r\n matches = [(team1,team2,p1,p2),(team3,team4,p3,p4),(team5,team6,p5,p6)...]\r\n \r\n and each tuple is called a 'match' and is explained as below: \r\n (team1,team2,p1,p2)\r\n team1: an object of the class 'team' representing one team which is playing\r\n team2: an object of the class 'team' representing the second team which is playing\r\n p1: the probability that team 1 will win (default 0.5)\r\n p2: the probability that team 2 will win (default 0.5)\r\n ''' \r\n self.matches = matches\r\n \r\n def get_matches(self):\r\n list_of_matches = []\r\n for match in self.matches:\r\n team1 = match[0]\r\n team2 = match[1]\r\n team1_prob = match[2]\r\n team2_prob = match[3]\r\n list_of_matches.append((team1,team2,team1_prob,team2_prob))\r\n return list_of_matches\r\n \r\ndef get_match_result_new(match):\r\n ''' \r\n A function which \"chooses\" the winner of a particular match based on the prior probability\r\n of that match\r\n \r\n suppose a 'match' variable is called with following example: (team1,team2,0.7,0.3)\r\n here the probability of team 1 winning is set as 0.7, while that of team 2 is 0.3. \r\n Thus the function returns team 1, with a probaability of 0.7, and team 2 with a \r\n probabilty of 0.3. If this function is called with the same 'match' for 100 times,\r\n then the function returns team1 70 times, and team2 30 times (approx)\r\n \r\n '''\r\n teams = np.array([match[0],match[1]])\r\n teamA = match[0]\r\n teamB = match[1]\r\n \r\n if past_performance_influence == 'weak':\r\n \r\n teamA_run = int(np.random.choice(runs,1,replace=False,p=prob))\r\n teamB_run = int(np.random.choice(runs,1,replace=False,p=prob))\r\n \r\n if teamA_run > teamB_run:\r\n teamA.for_runs = np.random.choice(runrate,1,replace=False,p=probrate)\r\n \r\n return np.random.choice(teams,1,replace=False,p=probabilities)[0] \r\n\r\ndef get_match_result(match):\r\n ''' \r\n A function which \"chooses\" the winner of a particular match based on the prior probability\r\n of that match\r\n \r\n suppose a 'match' variable is called with following example: (team1,team2,0.7,0.3)\r\n here the probability of team 1 winning is set as 0.7, while that of team 2 is 0.3. \r\n Thus the function returns team 1, with a probaability of 0.7, and team 2 with a \r\n probabilty of 0.3. If this function is called with the same 'match' for 100 times,\r\n then the function returns team1 70 times, and team2 30 times (approx)\r\n \r\n '''\r\n teams = np.array([match[0],match[1]])\r\n \r\n probabilities = [match[2],match[3]]\r\n \r\n return np.random.choice(teams,1,replace=False,p=probabilities)[0]\r\n\r\ndef probability_upto_n(teamname,n):\r\n '''\r\n Calculates the probability of a team being in top 'n' positions\r\n '''\r\n \r\n p1 = positions[0].count(teamname) / number_of_scenarios\r\n p2 = positions[1].count(teamname) / number_of_scenarios\r\n p3 = positions[2].count(teamname) / number_of_scenarios\r\n p4 = positions[3].count(teamname) / number_of_scenarios\r\n p5 = positions[4].count(teamname) / number_of_scenarios\r\n p6 = positions[5].count(teamname) / number_of_scenarios\r\n p7 = positions[6].count(teamname) / number_of_scenarios\r\n p8 = positions[7].count(teamname) / number_of_scenarios\r\n \r\n p = [p1,p2,p3,p4,p5,p6,p7,p8]\r\n \r\n return sum(p[:n])\r\n\r\ndef probability_at_n(teamname,n):\r\n '''\r\n Calculates the probability of a team being at the 'n'th position\r\n '''\r\n p = positions[n].count(teamname) / number_of_scenarios\r\n \r\n return p\r\n\r\n\r\n\r\ndef get_scenario(tournament_matches):\r\n \r\n ''''\r\n A scenario is one set of results for all the remaining matches of the tournament\r\n The input is tournament matches which is apython list of all remaining matches\r\n For each match the winner and loser is collected and returned as a python dictionary\r\n winners{0:winner1,1: winner2,..} where 0, 1, 2 are just index \r\n '''\r\n winners = {}\r\n losers = {}\r\n for i,match in enumerate(tournament_matches):\r\n team1 = match[0].name\r\n team2 = match[1].name\r\n nrr = random.uniform(0,2)\r\n winner = get_match_result(match)\r\n loser = [x for x in match[:2] if x.name != winner.name][0]\r\n winners[i] = [(team1,team2),winner.name,winner.id,nrr]\r\n losers[i] = [(team1,team2),loser.name,loser.id,-nrr]\r\n \r\n return winners,losers\r\n \r\nnumber_of_scenarios = 10000\r\nwinners = {}\r\nlosers = {}\r\n\r\nfinal_standings = {}\r\n\r\ncurrent_standings = {}\r\n\r\nstandings_today = get_standings()\r\nschedule_as_of_today = get_schedule()\r\n\r\nteams_now = [team(x[0],x[1],x[2],shorthand(x[3])) for x in standings_today.values()]\r\n\r\nfor i in range(number_of_scenarios): \r\n if i/number_of_scenarios*100 % 10 == 0:\r\n print(str(round(i/number_of_scenarios*100))+\"% of scenarios simulated...\")\r\n \r\n teams = league([x.copy() for x in teams_now])\r\n \r\n if i == 0:\r\n current_standings[i] = teams.standings()\r\n\r\n\r\n '''\r\n After every match, update the index in 'remaingin_matches_list before sending it\r\n '''\r\n remaining_matches_list = teams.get_remaining_matches(schedule_as_of_today)\r\n \r\n ipl2020 = tournament(remaining_matches_list)\r\n\r\n\r\n winners[i],losers[i] = get_scenario(ipl2020.get_matches())\r\n \r\n for win_row in winners[i].values():\r\n winning_team_id = win_row[2]\r\n winning_team = teams.get_team_from_id(winning_team_id)\r\n winning_team.add_points(2)\r\n winning_team.add_nrr(win_row[3])\r\n \r\n for loser_row in losers[i].values():\r\n losing_team_id = loser_row[2]\r\n losing_team = teams.get_team_from_id(losing_team_id)\r\n losing_team.add_nrr(loser_row[3])\r\n \r\n final_standings[i] = teams.standings()\r\n \r\n del teams\r\n \r\n \r\npositions = []\r\nfor position in range(8):\r\n names = []\r\n for standing in final_standings.values():\r\n names.append(standing[position][0])\r\n positions.append(names)\r\n\r\n\r\nrank = {}\r\nfor pos in range(8):\r\n teamname = mode(positions[pos])\r\n rank[pos] = [teamname,round(positions[pos].count(teamname)/number_of_scenarios*100,2)]\r\n\r\nteamname_list = [x.name for x in teams_now]\r\n \r\noverall_team_chances = {}\r\nteam_colors = {\r\n 'MI':'blue',\r\n 'DC':'purple',\r\n 'RCB':'red',\r\n 'CSK':'yellow',\r\n 'KKR':'black',\r\n 'RR':'green',\r\n 'PK':'pink',\r\n 'SH':'orange'}\r\n\r\nfor teamname in teamname_list:\r\n team_chances = []\r\n for pos in range(8):\r\n team_chances.append(positions[pos].count(teamname)/number_of_scenarios*100)\r\n team_chances = np.array(team_chances)\r\n overall_team_chances[teamname] = team_chances\r\nx = np.arange(1,9,1)\r\n\r\n\r\nfor teamname in teamname_list:\r\n y = overall_team_chances[teamname]\r\n plt.fill_between(x,y,color=team_colors[teamname],alpha=0.6);\r\n plt.ylabel('Percentage chance')\r\n plt.xlabel('Position')\r\n plt.ylim([0,100])\r\n plt.title('Number of simulations: '+str(number_of_scenarios))\r\n\r\nplt.legend(teamname_list,ncol=3,loc='upper left')\r\n\r\n\r\n## Calculate average score and NRR\r\n\r\naverage_standing = {}\r\n\r\nfor teamname in teamname_list:\r\n avg_score = 0\r\n avg_nrr = 0\r\n for standing in final_standings.values():\r\n [(score,nrr)] = [(x[1],x[2]) for x in standing.values() if x[0] == teamname]\r\n avg_score += score\r\n avg_nrr += nrr\r\n\r\n average_standing[teamname] = [round(avg_score/number_of_scenarios),round(avg_nrr/number_of_scenarios,2)]\r\n\r\nfinal_sorted_teamnames = sorted(average_standing,key=average_standing.get,reverse=True)\r\n\r\n# print('Team\\t\\t\\t Score \\t\\t\\t NRR \\n\\n')\r\n# for teamname in final_sorted_teamnames:\r\n# print(teamname+'\\t\\t\\t'+str(average_standing[teamname][0])+'\\t\\t\\t'+str(average_standing[teamname][1]))\r\n\r\n\r\nteam_chances = {}\r\nfor teamname in final_sorted_teamnames:\r\n team_chances[teamname] = [round(probability_upto_n(teamname,2),2),round(probability_upto_n(teamname,4),2)]\r\n #print(teamname+'\\t\\t\\t'+str(round(top_x_chance(teamname,2),2))+'\\t\\t\\t'+str(round(top_x_chance(teamname,4),2)))\r\n\r\ntopchances_df = pd.DataFrame(team_chances.values(),columns=['top 2','top 4'],index=list(team_chances.keys()))\r\n#display(topchances_df)\r\n\r\ntopchances_df.plot.pie(subplots=True,legend=False,title='Chances of each team qualifying for play-offs', figsize=(20,8))\r\n\r\nfig,ax1 = plt.subplots()\r\nax1.set_xlabel('Position')\r\nax1.set_ylabel('Team')\r\n\r\noverall_chances_df = pd.DataFrame(overall_team_chances.values(),columns=np.arange(1,9,1),index=overall_team_chances.keys())\r\nsns.heatmap(data=overall_chances_df,robust=True,center=50,ax=ax1)\r\n\r\nprint(overall_chances_df)\r\n\r\ntime_now = datetime.now()\r\n# Predictions\r\ntime_string = time_now.strftime(\"%Y_%m_%d-%H_%M\")\r\nfilename = \"predictions/prediction_at_\"+time_string+\".txt\"\r\n\r\nprediction_file = open(filename,\"w\")\r\n\r\n## PRINT OVERALL PERCENTAGE CHANCES\r\nprediction_file.write(\"Predictions as of \"+time_string+\"\\n\")\r\nprediction_file.write(\"----------------------------------- \\n\")\r\nprediction_file.write(\"Overall chances for teams (%) \\n\")\r\nprediction_file.write(\"Team | Position ---> \\n\")\r\nprediction_file.write(\" | \\n\")\r\nprediction_file.write(\" | \\n\")\r\nprediction_file.write(\" V \\n\")\r\nprediction_file.write(tabulate(overall_chances_df,headers='keys',tablefmt='psql'))\r\nprediction_file.write(\"\\n\\n\")\r\n\r\n## PRINT CHANCES FOR TOP 4\r\n\r\nprediction_file.write(\"Chances of qualifying for playoffs and being in top two \\n\")\r\nprediction_file.write(tabulate(topchances_df, headers='keys',tablefmt='psql'))\r\nprediction_file.write(\"\\n\\n\")\r\n\r\n## PRINT AVERAGE STANDINGS\r\n\r\nprediction_file.write(\"Likely standings \\n\")\r\nprediction_file.write(tabulate(average_standing, headers='keys',tablefmt='psql'))\r\n\r\nprediction_file.close()"
] | [
[
"numpy.array",
"numpy.random.choice",
"numpy.lexsort",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.fill_between",
"numpy.arange",
"matplotlib.pyplot.ylabel"
]
] |
Petr-By/qtpyvis | [
"0b9a151ee6b9a56b486c2bece9c1f03414629efc"
] | [
"dltb/tool/activation.py"
] | [
"\"\"\"A collection of tools for dealing with network activations. This\ncomprises:\n\n* The :py:class:`ActivationTool` that allows to obtain activation values\n from a :py:class:`Network`.\n\n* The :py:class:`ActivationWorker` is a controller for an\n :py:class:`ActivationTool`, allowing to run it asynchronously.\n\n* The :py:class:`ActivationArchive` allows to store activation values\n for later processing.\n\nThe actual tools for working with activations are:\n\n* `ActivationComparison`: Compare activation values collected from\n two layers (same or different size), which can be from same or\n different networks, different epochs in training, etc.\n\n\"\"\"\n\n# standard imports\nfrom abc import ABC, abstractmethod\nfrom typing import List, Tuple, Iterable, Iterator, Set\nfrom typing import Optional, Union, Sequence\nfrom pathlib import Path\nimport os\nimport logging\n\n# third party imports\nimport numpy as np\n\n# toolbox imports\nfrom network import Network, Classifier, ShapeAdaptor, ResizePolicy\nfrom ..network import Layer, Layerlike, layer_key, as_layer\nfrom ..datasource import Datasource, Datafetcher\nfrom ..base.observer import BaseObserver\nfrom ..base.prepare import Preparable\nfrom ..base.store import Storable, FileStorage\nfrom ..base.data import Data\nfrom ..util.array import adapt_data_format, DATA_FORMAT_CHANNELS_FIRST\nfrom ..util import nphelper, formating\nfrom ..config import config\nfrom .highscore import Highscore, HighscoreGroup, HighscoreCollection\nfrom .highscore import HighscoreGroupNumpy\nfrom . import Tool, Worker\n\n# logging\nLOG = logging.getLogger(__name__)\n\n\nclass Fillable(Storable, ABC, storables=['_valid', '_total']):\n \"\"\"A :py:class:`Fillable` object can be incrementally filled.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self._total = 0\n self._valid = 0\n\n def __len__(self) -> int:\n \"\"\"The length of this :py:class:`Fillable` is its\n :py:prop:`valid` size.\n To access the total size, use `:py:prop:total`.\n \"\"\"\n return self.valid\n\n def _post_prepare(self) -> None:\n super()._post_prepare()\n\n if not self.full():\n LOG.warning(\"Fillable is only partly filled (%d/%d)\",\n self._valid, self._total)\n\n @property\n def total(self) -> int:\n \"\"\"The total size of this :py:class:`Fillable`. May be more than the\n :py:prop:`valid`.\n\n \"\"\"\n return self._total\n\n @property\n def valid(self) -> int:\n \"\"\"The valid size of the :py:class:`Fillable` to operate on. May be\n less than :py:prop:`total` if just a part has been filled yet.\n\n \"\"\"\n return self._valid\n\n def full(self) -> bool:\n \"\"\"A flag indicating if this :py:class:`ActivationsArchive` archive\n is completely filled, meaning all activation values for the\n :py:class:`Datasource` have been added to the archive.\n \"\"\"\n return self.valid == self.total\n\n def fill(self, overwrite: bool = False) -> None:\n \"\"\"Fill this :py:class:`ActinvationsArchive` by computing activation\n values for data from the underlying :py:class:`Datasource`.\n\n Arguments\n ---------\n overwrite:\n If `True`, the fill process will start with the first\n data item, overwriting results from previous runs.\n If `False`, the fill process will start from where the\n last process stopped (if the archive is already filled\n completly, no further computation is started).\n \"\"\"\n if overwrite:\n self._valid = 0\n with self:\n for index in range(self.valid, self.total):\n self.fill_item(index)\n self.valid = index\n\n @abstractmethod\n def fill_item(self, index: int) -> None:\n \"\"\"Fill the given item.\n \"\"\"\n\n\nclass DatasourceTool(Preparable):\n \"\"\"A tool that makes use of a :py:class:`Datasource`.\n\n Properties\n ---------\n datasource: Datasource\n The datasource to process\n \"\"\"\n\n # _datasource:\n # The Datasource for which activation values are computed\n _datasource: Union[str, Datasource] = None\n _datasource_required: bool = True\n\n def __init__(self, datasource: Optional[Union[Datasource, str]] = None,\n layers: Optional[Iterable[Layerlike]] = None,\n **kwargs) -> None:\n super().__init__(**kwargs)\n\n if isinstance(datasource, str):\n self._datasource_key = datasource\n self._datasource = None\n elif isinstance(datasource, Datasource):\n self._datasource_key = datasource.key\n self._datasource = datasource\n else:\n raise ValueError(f\"Invalid type {type(datasource)} \"\n \"for datasource argument.\")\n\n self._layers = layers and [layer_key(layer) for layer in layers]\n\n def _prepare(self) -> None:\n super()._prepare()\n if self._datasource_required:\n self._prepare_datasource()\n\n def _prepare_datasource(self) -> None:\n if self._datasource is None:\n self._datasource = Datasource[self._datasource_key]\n self._datasource.prepare()\n\n @property\n def datasource_key(self) -> str:\n \"\"\"Datasource key.\n \"\"\"\n return self._datasource_key\n\n\nclass NetworkTool(Preparable):\n \"\"\"A :py:class:`NetworkTool` makes use of a :py:class:`Network`.\n \"\"\"\n # _network:\n # The Network by which activation values are obtained.\n _network: Union[str, Network] = None\n _network_required: bool = True\n\n # _layers:\n # The keys of the network layers that are used by this NetworkTool\n _layers: List[str] = None # sequence of layers\n\n def __init__(self, network: Optional[Union[Network, str]] = None,\n layers: Optional[Iterable[Layerlike]] = None,\n **kwargs) -> None:\n super().__init__(**kwargs)\n if isinstance(network, str): # we got a network key\n self._network_key = network\n self._network = None\n elif isinstance(network, Network):\n self._network_key = network.key\n self._network = network\n else:\n raise ValueError(f\"Invalid type {type(network)} \"\n \"for network argument.\")\n\n self._layers = layers and [layer_key(layer) for layer in layers]\n\n @property\n def network_key(self) -> str:\n \"\"\"Network key.\n \"\"\"\n return self._network_key\n\n def _prepare(self) -> None:\n super()._prepare()\n\n if self._network_required:\n self._prepare_network()\n\n if self._layers is not None and self._network is not None:\n self.check_layers(self._network.layer_names())\n\n def _prepare_network(self) -> None:\n if self._network is None:\n self._network = Network[self._network_key]\n self._network.prepare()\n\n def layers(self, *what) -> Iterable[Tuple]:\n \"\"\"Iterate layer of layer information.\n\n Arguments\n ---------\n what:\n Specifies what information should be provided. Valid\n values are: `'name'` the layer name,\n `'layer'` the actual layer object,\n `'shape'` the output shape (without batch axis).\n The values `'layer'` and `'shape'` are only available if the\n :py:class:`Network`, not just the network key, has been provided\n upon initialization of this :py:class:`NetworkTool`).\n \"\"\"\n if self._layers is None:\n return # nothing to do\n if not what:\n what = ('name', )\n elif (('layer' in what or 'shape' in what) and\n not isinstance(self._network, Network)):\n raise ValueError(f\"Iterating over {what} is only possible with \"\n \"an initialized Network.\")\n for layer in self._layers:\n name = layer_key(layer)\n layer = as_layer(layer, network)\n values = tuple((name if info == 'name' else\n layer if info == 'layer' else\n layer.output_shape[1:] if info == 'shape' else\n '?')\n for info in what)\n yield values[0] if len(what) == 1 else values\n\n def check_layers(self, layers: Iterable[Layerlike],\n exact: bool = False) -> None:\n \"\"\"Check if the layers requested by the tool (stored in\n :py:prop:`_layers`) are contained in the available layers.\n\n Arguments\n ---------\n layers:\n The available layers.\n exact:\n If `True`, then an exact match is required, if `False`\n it is also ok if :py:prop:`_layers` are a subset of\n `layers`.\n \"\"\"\n # check if all requested layers are availabe\n available_layers = set(layer_key(layer) for layer in layers)\n requested_layers = set(self._layers)\n\n if exact and (available_layers != requested_layers):\n diff = requested_layers.symmetric_difference(available_layers)\n raise ValueError(f\"Requested layers {requested_layers} and \"\n f\"available layer {available_layers} differ:\"\n f\"{diff}\")\n if not requested_layers.issubset(available_layers):\n raise ValueError(f\"Some requested layers {requested_layers} \"\n f\"are not available {available_layers}\")\n\n\nclass IteratorTool(Storable, storables=['_current_index']):\n \"\"\"The abstract :py:class:`IteratorTool` interface is intended to\n support tools that do iterative processing. It has an internal\n index which will be stored (and restored) if the tool is\n intialized with the `store=True` parameter.\n\n \"\"\"\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self._current_index = 0\n\n @property\n def current_index(self) -> int:\n \"\"\"The current index of this :py:class.`IteratorTool`.\n All indices smaller the the `current_index` already have\n been processed, all indices starting with `current_index`\n still have to be processed.\n \"\"\"\n\n def current_range(self, end: int = None) -> Iterator[int]:\n \"\"\"Iterate over the valid indices for this tool, starting from the\n current index up to the end (the length of this tool).\n \"\"\"\n if end is None:\n end = len(self)\n for index in range(self._current_index, end):\n yield index\n self._current_index = index\n\n\nclass DatasourceActivations(DatasourceTool, NetworkTool, IteratorTool, ABC):\n \"\"\"An interface for classes that provide activation values for the of\n an (indexed) :py:class:`Datasource`. The interface allows to\n either access activation values for individual data items,\n identified by their (numerical) index, or to iterate over the\n activation values. Activation values can be obtained for\n individual layers, or for all simultanously.\n \"\"\"\n\n @abstractmethod\n def __getitem__(self, key) -> Union[np.ndarray, dict]:\n \"\"\"\n Arguments\n ---------\n key:\n Either `index`, or a tuple `(layer, index)`.\n\n Result\n ------\n activations:\n Either a single activation map of `(layer, index)`, or\n a dictionary of activation maps for `index`.\n \"\"\"\n\n def __iter__(self) -> Iterator:\n return self.activations()\n\n def activations(self, layer: str = None) -> Iterator:\n \"\"\"Iterate over the activation values.\n \"\"\"\n for index in self.current_range():\n yield self[layer, index]\n\n\nclass DatasourceNetworkActivations(DatasourceActivations):\n \"\"\"An implementation of the :py:class:`DatasourceActivations` that\n obtains activation values by computing them using a\n :py:class:`Network`.\n \"\"\"\n _network_required: bool = True\n _datasource_required: bool = True\n\n def __getitem__(self, key) -> Union[np.ndarray, dict]:\n \"\"\"\n Arguments\n ---------\n key:\n Either `index`, or a tuple `(layer, index)`.\n\n Result\n ------\n activations:\n Either a single activation map of `(layer, index)`, or\n a dictionary of activation maps for `index`.\n \"\"\"\n layer, index = key if isinstance(key, tuple) else (None, key)\n return self._network.get_activations(self._datasource[index], layer)\n\n\nclass ActivationsArchive(DatasourceActivations, Fillable, Storable, ABC):\n \"\"\"An :py:class:`ActinvationsArchive` represents an archive of\n activation values obtained by applying a\n :py:class:`ActivationTool` to a :py:class:`Datsource`.\n\n The total size of an :py:class:`ActivationsArchiveNumpy`, that is\n the number of data points for which activations are stored in the\n archive, has to be provided uppon initialization and cannot be\n changed afterwards. This number be accessed via the property\n :py:prop:`total`. The archive supports incremental updates,\n allowing to fill the archive in multiple steps and to continue\n fill operations that were interrupted. The number of valid data\n points filled so far is stored in the metadata as property\n `valid` and can be accessed through the property :py:prop:`valid`.\n\n If no explicit argument is provided, the\n :py:class:`ActivationsArchiveNumpy` uses the value\n `config.activations_directory` as well as the `ActivationTool` and\n `Datasource` identifiers (as provided by the `key` property),\n to construct a directory name.\n\n Use cases\n ---------\n\n Fill the archive by iterating over a :py:class:`Datasource`:\n\n >>> with ActivationsArchive(network, datasource, store=True) as archive:\n >>> archive.fill()\n\n This can also be achieved explicitly:\n\n >>> with ActivationsArchive(network, datasource, store=True) as archive:\n >>> for index in range(archive.valid, archive.total):\n >>> archive += network.get_activations(datasource[index])\n\n Batchwise filling is also supported:\n\n >>> with ActivationsArchive(network, datasource, store=True) as archive:\n >>> for batch in datasource.batches(batch_size=64,start=archive.valid):\n >>> activation_tool.process(batch)\n >>> archive += batch\n\n Once the archive is (partly) filled, it can be used in read only mode:\n\n >>> with ActivationsArchive(network, datasource, mode='r') as archive:\n >>> activations = archive[index, 'layer1']\n >>> batch_activations = archive[index1:index2, 'layer1']\n\n Activations can also be obtained encapsuled in a :py:class:`Data`\n object:\n\n >>> with ActivationsArchive(network, datasource, mode='r') as archive:\n >>> data = archive.data(index, 'layer1')\n\n\n Properties\n ----------\n\n Storable properties\n -------------------\n total: int\n The total number of entries in the underlying :py:class:`Datasource`.\n valid: int\n The number of data entries alread processed.\n _layers: List[str]\n\n Arguments\n ---------\n layers: List[str]\n The layers to be covered by this :py:class:`ActivationsArchive`.\n If opened for reading (restore), this has to be a subset of\n the layers present in the stored archive. If creating a new\n archive, this may be any subset of the layers present in the\n :py:class:`Network` (if `None`, all layers of that network\n will be covered). If updating an existing archive, if not `None`,\n the layer list has to exactly match the layers of that archive.\n store: bool\n Open the archive for writing.\n restore: bool\n Open the archive for reading.\n \"\"\"\n\n def __new__(cls, **_kwargs) -> 'ActivationsArchive':\n if cls is ActivationsArchive:\n new_cls = ActivationsArchiveNumpy\n else:\n new_cls = cls\n return super(ActivationsArchive, new_cls).__new__(new_cls)\n\n def __init__(self, **kwargs) -> None:\n # Change the default Storable behaviour (store/restore), to\n # just restore, if no explicit 'store' flag is given:\n if 'store' not in kwargs:\n kwargs['store'] = False\n kwargs['restore'] = True\n super().__init__(**kwargs)\n\n if not isinstance(self._storage, FileStorage):\n directory = Path(config.activations_directory) /\\\n (self._network_key + '-' + self._datasource_key)\n self._storage = FileStorage(directory=directory)\n\n if self._store_flag:\n self._datasource_required = True\n self._network_required = True\n LOG.info(\"ActivationsArchiveNumpy with storage '%s' initalized.\",\n self._storage)\n\n @property\n def directory(self) -> str:\n \"\"\"The directory in which the activation maps are stored.\n \"\"\"\n return self._storage.directory\n\n def _prepare(self) -> None:\n super()._prepare()\n if self._total is None: # new archive\n self._total = len(self._datasource)\n self._valid = 0\n\n def fill_item(self, index: int) -> None:\n \"\"\"Fill the given item.\n \"\"\"\n self[index] = self._network.get_activations(self._datasource[index])\n\n def __iadd__(self, values) -> object:\n \"\"\"Add activation values to this\n :py:class:`ActivationsArchive`.\n\n Arguments\n ---------\n values:\n The activation values to add. Currently only a list or\n dictionary of activation values are supported.\n \"\"\"\n # FIXME[todo]: allow to add a batch of values\n self[self.valid] = values\n self._valid += 1\n return self\n\n\nclass ActivationsArchiveNumpy(ActivationsArchive, DatasourceTool, storables=[\n 'shape', 'dtype']):\n \"\"\"The :py:class:`ActivationsArchiveNumpy` realizes an\n :py:class:`ActivationsArchive` based on the Numpy `memmap`\n mechanism.\n\n All files of the :py:class:`ActivationsArchiveNumpy` are stored in\n the directory :py:prop:`directory`. Each layer gets a separate\n file, called `[LAYER_NAME].dat`. Metadata for the archive are\n stored in JSON format into the file `meta.json`.\n\n\n Notes\n -----\n\n Note: The numpy `memmap` mechanism does not provide means for\n compression. Files are stored uncompressed and may have extreme\n sizes for larger activation maps of datasources.\n\n Note: Depending on the file system, memmap may create files of\n desired size but only allocate disk space while filling the files.\n This may result in an (uncatchable) bus error if the device runs\n out of space.\n\n \"\"\"\n\n def __init__(self, dtype: str = 'float32', **kwargs) -> None:\n self._layers_memmap = None\n super().__init__(**kwargs)\n self.dtype = dtype\n self.shape = None\n LOG.info(\"%s initalized (%s/%s).\", type(self).__name__,\n self._network_key, self._datasource_key)\n\n def layers(self, *what) -> Iterator[Tuple]:\n \"\"\"Iterate over the layer information for the layers covered by this\n :py:class:`ActivationsArchiveNumpy`.\n\n Arguments\n ---------\n what: str\n Specifies the what information should be provided. Valid\n values are: `'name'` the layer name,\n `'dtype'` the dtype of the layer,\n `'shape'` the layer layer,\n `'layer'` the actual layer object (only available if the\n :py:class:`Network`, not just the network key, has been provided\n upon initialization of this :py:class:`ActinvationsArchive`).\n \"\"\"\n if self._layers_memmap is None:\n for value in super().layers(*what):\n yield value\n return\n\n if not what:\n what = ('name', )\n elif 'layer' in what and not isinstance(self._network, Network):\n raise ValueError(\"Iterating over Layers is only possible with \"\n \"an initialized Network.\")\n for layer in self._layers:\n name = layer_key(layer)\n memmap = self._layers_memmap[name]\n yield ((name if info == 'name' else\n memmap.dtype if info == 'dtype' else\n memmap.shape[1:] if info == 'shape' else\n memmap.nbytes if info == 'bytes' else\n self._network[name] if info == 'layer' else '?')\n for info in what)\n\n def _prepared(self) -> bool:\n return self._layers_memmap is not None and super()._prepared()\n\n def _prepare(self) -> None:\n super()._prepare()\n\n # make sure that all requested layers are available. In write\n # mode, all available layers should be updated to avoid\n # inconsistent data\n if self._layers is None:\n self._layers = list(self.shape.keys())\n else:\n self.check_layers(self.shape.keys(), exact=self._store_flag)\n\n # prepare the layer memmaps\n memmaps = {}\n dtype = np.dtype(self.dtype)\n for layer in self._layers:\n filename = self._storage.filename(layer + '.dat')\n shape = tuple(self.shape[layer])\n mode = 'r' if not self._store_flag else \\\n ('r+' if filename.exists() else 'w+')\n memmaps[layer] = \\\n np.memmap(filename, dtype=dtype, mode=mode, shape=shape)\n self._layers_memmap = memmaps\n\n LOG.info(\"ActivationsArchiveNumpy with storage '%s' and %d layers and \"\n \"%d/%d entries prepared for store_flag=%s.\", self._storage,\n len(self._layers), self.valid, self.total, self._store_flag)\n\n def _unprepare(self) -> None:\n # close the memmap objects\n if self._layers_memmap is not None:\n for memmap in self._layers_memmap.values():\n del memmap\n self._layers_memmap = None\n self._layers = None\n LOG.info(\"ActivationsArchiveNumpy with storage '%s' unprepared.\",\n self._storage)\n super()._unprepare()\n\n def _store(self) -> None:\n \"\"\"Write unwritten data to the disk. This will also update the\n metadata file to reflect the current state of the archive.\n :py:meth:`store` is automatically called when upreparing or\n deleting this :py:class:`ActivationsArchiveNumpy` object.\n \"\"\"\n super()._store()\n if self._layers_memmap is not None:\n for memmap in self._layers_memmap.values():\n memmap.flush()\n\n def _fresh(self) -> None:\n \"\"\"Creating a fresh archive.\n \"\"\"\n super()._fresh()\n self._prepare_network()\n self._prepare_datasource()\n self._total = len(self._datasource)\n self.shape = {}\n\n if self._layers is None:\n self._layers = list(self._network.layer_names())\n total = (self.total,)\n for name, shape in self.layers('name', 'shape'):\n self.shape[name] = total + shape\n\n # We require the storage directory to exist in order\n # to place the memmeap files.\n os.makedirs(self._storage.directory, exist_ok=True)\n\n def __getitem__(self, key) -> None:\n layer, index = key if isinstance(key, tuple) else (None, key)\n\n if layer is None:\n return {layer: memmap[index]\n for layer, memmap in self._layers_memmap.items()}\n\n return self._layers_memmap[layer][index]\n\n def __setitem__(self, key, values) -> None:\n if not self._store_flag:\n raise ValueError(\"Archive is not writable\")\n\n layer, index = key if isinstance(key, tuple) else (None, key)\n\n if self._layers is None:\n raise ValueError(f\"Cannot set item {layer} \"\n \"as no Layers have been initialized \"\n \"for this ActivationsArchiveNumpy.\")\n\n if layer is None:\n if isinstance(values, dict):\n for layer, layer_values in values.items():\n self._update_values(layer, index, layer_values)\n elif isinstance(values, list):\n if len(values) != len(self._layers):\n raise ValueError(\"Values should be a list of length \"\n f\"{len(self._layers)} not {len(values)}!\")\n for layer, layer_values in zip(self._layers, values):\n self._update_values(layer, index, layer_values)\n else:\n raise ValueError(\"Values should be a list (of \"\n f\"length {len(self._layers)} \"\n f\"or a dictionary, not {type(values)}\")\n else:\n self._update_values(layer, index, values)\n\n def _update_values(self, layer: Layerlike, index, value) -> None:\n name = layer_key(layer)\n try:\n self._layers_memmap[name][index] = value\n except KeyError as error:\n raise KeyError(f\"Invalid layer '{name}', valid layers are \"\n f\"{list(self._layers_memmap.keys())}\") from error\n\n def info(self) -> None:\n \"\"\"Output a summary of this :py:class:`ActivationsArchiveNumpy`.\n \"\"\"\n print(f\"Archive with storage {self._storage}: \"\n f\"{self.valid}/{self.total}\")\n total_size = 0\n for name, dtype, shape, size in \\\n self.layers('name', 'dtype', 'shape', 'bytes'):\n print(f\" - {name+':':20s} {str(shape):20s} \"\n f\"of type {str(dtype):10s} [{formating.format_size(size)}]\")\n total_size += size\n print((\"No layers\" if self._layers is None else\n f\"Total {len(self._layers)} layers\") +\n f\" and {formating.format_size(total_size)}\")\n\n\nclass TopActivations(HighscoreCollection, DatasourceTool, NetworkTool,\n IteratorTool, Storable, storables=['_top', 'shape']):\n \"\"\"The :py:class:`TopActivations` stores the top activation values\n for the layers of a :py:class:`Network`.\n\n Iterative usage (the `+=` operator)\n -----------------------------------\n\n In iterative mode, the :py:class:`TopActivations` object will\n use an internal counter for indexing. Activation values can be\n iteratively added using the `+=` operator. New activation values\n will get the current internal counter as index value.\n\n >>> with TopActivations(top=5) as top_activations:\n >>> for data in datasource[len(top_activations):]:\n >>> top_activations += network.get_activations(data)\n\n\n Properties\n ----------\n\n top: int\n The number of activation layers to store\n\n layers: Sequence[Layer]\n The layers for which top activation values are stored.\n\n\n \"\"\"\n\n def __init__(self, top: int = 9, **kwargs) -> None:\n super().__init__(**kwargs)\n self.shape = None\n self._top = top\n\n def disciplines(self) -> Iterator[Tuple[str, int]]:\n \"\"\"The disciplines in this high score. A discipline is described\n by a layer name (key) and a channel index.\n \"\"\"\n for layer in self.layers('name'):\n for channel in range(self.shape[layer][-1]):\n yield (layer.key, channel)\n\n def highscore(self, discipline: Tuple[str, int]) -> Highscore:\n \"\"\"The highscore for a discipline (identified by layer name and\n channel number).\n \"\"\"\n return self.highscore_group(discipline[0])[discipline[1]]\n\n def highscore_group(self, layer: Layerlike) -> HighscoreGroup:\n \"\"\"The highscore group for a layer.\n \"\"\"\n return self._highscores[layer_key(layer)]\n\n def activations(self, layer: Layerlike,\n channel: slice = ...) -> np.ndarray:\n \"\"\"Top activation values for a given layer and channel.\n\n Arguments\n ---------\n layer:\n The layer for which the top activation values should\n be returned.\n channel:\n The channel in the layer for which top actviations are\n to be returned. If no channel is provided, the top activation\n values for all channels of that layer are returned.\n\n Result\n ------\n activations:\n A numpy array providing the activation values. The\n shape will be (top, ) if a channel was specified and\n (channels, top) if no channel was specified.\n \"\"\"\n return self.highscore_group[layer].scores[channel]\n\n def indices(self, layer: Layerlike,\n channel: slice = ...) -> np.ndarray:\n \"\"\"Return indices identifying the input stimuli that\n resulted in the top activation values.\n\n Arguments\n ---------\n layer:\n The layer for which the indices of top activation inputs\n should be returned.\n channel:\n The channel in the layer for which the indices are to be\n returned. If no channel is provided, the indices\n of top activation values for all channels of that layer\n are returned.\n\n Result\n ------\n indices:\n A numpy array providing the activation values. The\n shape will be (top, coordinates, ) if a channel was specified\n and (channels, top, coordinates) if no channel was specified.\n \"\"\"\n return self.highscore_group[layer].owners[channel]\n\n @property\n def filename_meta(self) -> Path:\n \"\"\"The name of the file holding the meta data for this\n :py:class:`ActivationsArchiveNumpy`.\n \"\"\"\n return self._storage.filename(f'top-{self._top}.json')\n\n def filename_top(self, layer: str) -> Path:\n \"\"\"The name of a file for storing top activation values for\n a given :py:class:`Layer`.\n \"\"\"\n return self._storage.filename(f'top-{self._top}-{layer}.npy')\n\n def _fresh(self) -> None:\n \"\"\"Prepare new meta data\n \"\"\"\n super()._fresh()\n self._prepare_datasource()\n self._prepare_network()\n\n self.shape = {}\n\n if self._layers is None:\n self._layers = list(self._network.layer_names())\n\n for name, layer in self.layers('name', 'layer'):\n shape = layer.output_shape\n channels = shape[-1]\n indices = len(shape) - 1 # -1 for the channel\n # indices: (channels, top, indices)\n self._highscores[name] = \\\n HighscoreGroupNumpy(top=self._top, size=channels,\n owner_dimensions=indices)\n self.shape[name] = shape\n\n def _unprepare(self) -> None:\n super()._unprepare()\n LOG.info(\"TopActivations with storage '%s' unprepared.\",\n self._storage)\n\n def _store(self) -> None:\n for name in self.layers('name'):\n with self.filename_top(name).open('wb') as outfile:\n self._highscores[name].store(outfile)\n super()._store()\n\n def _restore(self) -> None:\n super()._restore() # this should restore the meta data\n\n for name in self.layers('name'):\n # layer shape: (batch, position..., channel)\n shape = self.shape[name]\n channels = shape[-1]\n # indices: (batch, position...)\n indices = len(shape) - 1 # -1 for the channel\n highscore = HighscoreGroupNumpy(top=self._top, size=channels,\n owner_dimensions=indices)\n with self.filename_top(name).open('rb') as file:\n highscore.restore(file)\n self._highscores[name] = highscore\n\n def __iadd__(self, values) -> object:\n # the index array (containing only one index: the current position)\n index = np.asarray([self._current_index], dtype=np.int)\n\n if isinstance(values, dict):\n for layer, layer_values in values.items():\n self._highscores[layer].update(index, layer_values[np.newaxis])\n elif isinstance(values, list):\n if len(values) != len(self._layers):\n raise ValueError(\"Values should be a list of length\"\n f\"{len(self._layers)} not {len(values)}!\")\n for layer, layer_values in zip(self._layers, values):\n self._highscores[layer].update(index, layer_values[np.newaxis])\n else:\n raise ValueError(\"Values should be a list (of \"\n f\"length {len(self._layers)}) \"\n f\"or a dictionary, not {type(values)}\")\n self._current_index += 1\n return self\n\n def fill(self, activations: DatasourceActivations) -> None:\n \"\"\"Fill this :py:class:`TopActivations` from a\n :py:class:`DatasourceActivations` object. The object have to\n be compatible, that is the :py:class:`Network` and the\n :py:class:`Datasource` have to agree.\n\n \"\"\"\n if self._datasource_key != activations.datasource_key:\n raise ValueError(\"Incompatible datasoures:\"\n f\"{self.datasource_key} for TopActivations vs.\"\n f\"{activations.datasource_key} for \"\n \"DatasourceActivations\")\n if self.network_key != activations.network_key:\n raise ValueError(\"Incompatible networks:\"\n f\"{self.network_key} for TopActivations vs.\"\n f\"{activations.network_key} for \"\n \"DatasourceActivations\")\n while self._current_index < len(activations):\n self += activations[self._current_index]\n\n def receptive_field(self, layer: Layerlike,\n channel: int, top: int = 0) -> np.ndarray:\n \"\"\"Optain the image patch that causes a top activation of\n the :py:class:`network`.\n\n This function is only available, if :py:class:`Network` and\n :py:class:`Datasource` are available and prepared.\n \"\"\"\n indices = self.indices(layer, channel)[top]\n image = self._datasource[indices[0]]\n return self._network.\\\n extract_receptive_field(layer, indices[1:-1], image)\n\n def info(self) -> None:\n \"\"\"Output a summary of this :py:class:`TopActivations`.\n \"\"\"\n print(f\"TopActivations({self._top}): filled with \"\n f\"{self._current_index} entries from {self._datasource}\")\n\n\n# FIXME[todo]: this is essentially a wraper around Network.\n# Check if we could make the Network itself an ActivationTool\nclass ActivationTool(Tool, Network.Observer):\n \"\"\".. :py:class:: Activation\n\n The :py:class:`Activation` class encompassing network, current\n activations, and the like.\n\n An :py:class:`Activation` tool is :py:class:`Observable`. Changes\n in the :py:class:`Activation` that may affect the computation of\n activation are passed to observers (e.g. workers) calling the\n :py:meth:`Observer.activation_changed` method in order to inform\n them as to the exact nature of the model's change.\n\n **Changes**\n\n network_changed:\n The underlying :py:class:`network.Network` has changed,\n or its preparation state was altered. The new network\n can be accessed vie the :py:attr:`network` property.\n layer_changed:\n The selected set of layers was changed.\n\n Attributes\n ----------\n\n _network: Network\n Currently active network\n\n _layers: List[Layer]\n the layers of interest\n\n _classification: bool\n If True, the model will consider the current model\n as a classifier and record the output of the output layer\n in addition to the current (hidden) layer.\n\n Data processing\n ---------------\n\n The :py:class`ActivationTool` can be applied to `Data` objects. It\n will pass the `Data` object as argument to the underlying\n :py:class:`Network` and will store results as attributes in\n the `Data` object. It will use the following attributes:\n\n [tool_name]_activations:\n A dictionary mapping layer names to (numpy) arrays of activation\n values.\n \"\"\"\n _network: Network = None\n\n def __init__(self, network: Network = None, data_format: str = None,\n **kwargs) -> None:\n \"\"\"Create a new ``Engine`` instance.\n\n Parameters\n ----------\n network: Network\n Network providing activation values.\n \"\"\"\n super().__init__(**kwargs)\n\n # adapters\n # FIXME[old]:\n self._shape_adaptor = ShapeAdaptor(ResizePolicy.Bilinear())\n self._channel_adaptor = ShapeAdaptor(ResizePolicy.Channels())\n self._data_format = data_format\n\n # network related\n self.network = network\n\n @property\n def data_format(self) -> str:\n \"\"\"The data format (channel first/channel last/...) to be used by this\n :py:class:`ActivationTool`. If no data format has been set\n for this :py:class:`ActivationTool`, the data format of the\n underlying network will be used.\n \"\"\"\n if self._data_format is not None:\n return self._data_format\n if self._network is not None:\n return self._network.data_format\n return None\n\n #\n # network\n #\n\n def network_changed(self, _network: Network, info: Network.Change) -> None:\n \"\"\"React to changes of the :py:class:`Network`.\n The :py:class:`ActivationTool` is interested when the\n network becomes prepared (or unprepared). We just forward\n these notifications.\n \"\"\"\n LOG.debug(\"Activation.network_changed(%s)\", info)\n if info.state_changed:\n self.change('state_changed')\n\n @property\n def network(self) -> Network:\n \"\"\"Get the currently selected network.\n\n Returns\n -------\n The currently selected network or None if no network\n is selected.\n \"\"\"\n return self._network\n\n @network.setter\n def network(self, network: Network) -> None:\n if network is self._network:\n return # nothing changed\n\n if self._network is not None:\n self.unobserve(self._network)\n self._network = network\n if network is not None:\n interests = Network.Change('state_changed')\n self.observe(network, interests)\n # FIXME[old]: what is this supposed to do?\n if network.prepared and self._shape_adaptor is not None:\n self._shape_adaptor.setNetwork(network)\n self._channel_adaptor.setNetwork(network)\n self.change('tool_changed')\n\n #\n # Tool interface\n #\n\n external_result = ('activations', )\n internal_arguments = ('inputs', 'layer_ids')\n internal_result = ('activations_list', )\n\n def _preprocess(self, inputs: np.ndarray, layer_ids: List[Layer] = None,\n **kwargs) -> Data:\n # pylint: disable=arguments-differ\n # FIXME[todo]: inputs should probably be Datalike\n \"\"\"Preprocess the arguments and construct a Data object.\n \"\"\"\n context = super()._preprocess(**kwargs)\n array = inputs.array if isinstance(inputs, Data) else inputs\n context.add_attribute('inputs', array)\n unlist = False\n if layer_ids is None:\n layer_ids = list(self._network.layer_dict.keys())\n elif not isinstance(layer_ids, list):\n layer_ids, unlist = [layer_ids], True\n context.add_attribute('layer_ids', layer_ids)\n context.add_attribute('unlist', unlist)\n return context\n\n def _process(self, inputs: np.ndarray,\n layers: List[Layer]) -> List[np.ndarray]:\n # pylint: disable=arguments-differ\n \"\"\"Perform the actual operation, that is the computation of\n activation values for given input values.\n\n Arguments\n ---------\n inputs:\n Input data.\n layers:\n A list of layers for which to compute activations.\n \"\"\"\n\n LOG.info(\"ActivationTool: computing activations for data <%s>, \"\n \"layers=%s, activation format=%s\",\n inputs.shape, layers, self.data_format)\n\n if self._network is None:\n return None\n\n if not layers:\n return layers\n\n return self._network.get_activations(inputs, layers,\n data_format=self.data_format)\n\n def _postprocess(self, data: Data, what: str) -> None:\n if what == 'activations':\n activations_dict = dict(zip(data.layer_ids, data.activations_list))\n data.add_attribute(what, activations_dict)\n data.add_attribute('activations_dict', activations_dict)\n else:\n super()._postprocess(data, what)\n\n def data_activations(self, data: Data, layer: Optional[Layerlike] = None,\n unit: int = None,\n data_format: str = None) -> np.ndarray:\n \"\"\"Get the precomputed activation values for the current\n :py:class:`Data`.\n\n Arguments\n ---------\n data:\n The :py:class:`Data` object in which precomputed activations\n are stored.\n layer:\n The layer for which activation values should be obtained.\n unit:\n The unit for which activation values should be obtained.\n data_format:\n The data format (channel first or channel last) in which\n activation values should be returned. If `None` (default),\n the default format of this :py:class:`ActivationTool`\n (according to :py:prop:`data_format`) is used.\n\n Result\n ------\n activations:\n The requested activation valeus. The type depends on the\n arguments:\n If no `layer` is specified, the result will be a\n dictionary mapping layer names (`str`) activation values\n (`numpy.ndarray`).\n If a `layer` is specified, the activation values (np.ndarray)\n of that layer are returned.\n If in addtion to `layer` also a `unit` is specified, only\n the activation value(s) for that unit are returned.\n \"\"\"\n # FIXME[todo]: batch processing - add an 'index' argument ...\n\n # activations: dict[layer: str, activation_values: np.ndarray]\n activations = self.get_data_attribute(data, 'activations')\n if activations is None:\n return None\n\n if data_format is None:\n data_format = self.data_format\n elif unit is not None:\n LOG.warning(\"Providing a data_format (%s) has no effect \"\n \"when querying unit activation\", data_format)\n\n if layer is None: # return the full dictionary\n if data_format is self.data_format:\n return activations # no tranformation required\n\n # transform the data format of the activation values\n return list(map(lambda activation:\n adapt_data_format(activation,\n input_format=self._data_format,\n output_format=data_format),\n activations))\n\n activations = activations[layer_key(layer)]\n if data_format is not self.data_format:\n activations = adapt_data_format(activations,\n input_format=self.data_format,\n output_format=data_format)\n\n if unit is None:\n return activations\n\n return (activations[unit] if data_format == DATA_FORMAT_CHANNELS_FIRST\n else activations[..., unit])\n\n @staticmethod\n def top_indices(activations: np.ndarray, top: int = 1,\n sort: bool = False) -> np.ndarray:\n \"\"\"Get the indices of the top activations.\n \"\"\"\n return nphelper.argmultimax(activations, num=top, sort=sort)\n\n @staticmethod\n def top_activations(activations: np.ndarray, top: int = 1,\n sort: bool = False) -> np.ndarray:\n \"\"\"Get the top activation values.\n \"\"\"\n return nphelper.multimax(activations, num=top, sort=sort)\n\n\nclass ActivationWorker(Worker):\n \"\"\"A :py:class:`Worker` specialized to work with the\n :py:class:`ActivationTool`.\n\n layers:\n The layers for which activations shall be computed.\n\n data: (inherited from Worker)\n The current input data\n activations: dict\n The activations for the current data\n\n \"\"\"\n\n class Observer(BaseObserver):\n \"\"\"An :py:class:`Observer` of a :py:class:`ActivationWorker`\n should specify which layers should be computed.\n \"\"\"\n\n def layers_of_interest(self, worker) -> Set[Layer]:\n # pylint: disable=no-self-use,unused-argument\n \"\"\"The layers that this :py:class:`Observer` is interested in.\n \"\"\"\n return set()\n\n def __init__(self, network: Network = None, tool: ActivationTool = None,\n **kwargs) -> None:\n if network is not None:\n if tool is not None:\n raise ValueError(\"Cannot use both 'tool' and 'network' \"\n \"for initializing a ActivationWorker\")\n tool = ActivationTool(network)\n super().__init__(tool=tool, **kwargs)\n self._layer_ids = []\n self._fixed_layers = []\n self._classification = False\n\n self._activations = None\n\n #\n # Tool core functions\n #\n\n def _apply_tool(self, data: Data, **kwargs) -> None:\n \"\"\"Apply the :py:class:`ActivationTool` on the given data.\n \"\"\"\n self.tool.apply(self, data, layers=self._layer_ids, **kwargs)\n\n def activations(self, layer: Layer = None, unit: int = None,\n data_format: str = None) -> np.ndarray:\n \"\"\"Get the precomputed activation values for the current\n :py:class:`Data`.\n \"\"\"\n activations = \\\n self._tool.data_activations(self._data, layer=layer, unit=unit,\n data_format=data_format)\n LOG.debug(\"ActivationWorker.activations(%s,unit=%s,data_format=%s):\"\n \" %s\", layer, unit, data_format,\n None if activations is None else\n len(activations) if layer is None else activations.shape)\n return activations\n\n def _ready(self) -> bool:\n # FIXME[hack]\n return (super()._ready() and\n self._tool.network is not None and\n self._tool.network.prepared)\n\n @property\n def network(self) -> Network:\n \"\"\"The network employed by this :py:class:`ActivationWorker`.\n \"\"\"\n return self._tool.network\n\n # FIXME[todo]: should be renamed or become a setter\n def set_network(self, network: Network,\n layers: List[Layer] = None) -> None:\n \"\"\"Set the current network. Update will only be published if\n not already selected.\n\n Parameters\n ----------\n network : str or int or network.network.Network\n Key for the network\n \"\"\"\n LOG.info(\"Engine.set_network(%s): old=%s\", network, self._network)\n if network is not None and not isinstance(network, Network):\n raise TypeError(\"Expecting a Network, \"\n f\"not {type(network)} ({network})\")\n\n if self._tool is None:\n raise RuntimeError(\"Trying to set a network \"\n \"without having a Tool.\")\n\n self._tool.network = network\n\n # set the layers (this will also trigger the computation\n # of the activations)\n self.set_layers(layers)\n self.change(tool_changed=True)\n\n #\n # Layer configuration\n #\n\n def set_layers(self, layers: List[Layer]) -> None:\n \"\"\"Set the layers for which activations shall be computed.\n\n \"\"\"\n self._fixed_layers = \\\n layers if isinstance(layers, list) else list(layers)\n self._update_layers()\n\n def add_layer(self, layer: Union[str, Layer]) -> None:\n \"\"\"Add a layer to the list of activation layers.\n \"\"\"\n if isinstance(layer, str):\n self._fixed_layers.append(self.network[layer])\n elif isinstance(layer, Layer):\n self._fixed_layers.append(layer)\n else:\n raise TypeError(\"Invalid type for argument layer: {type(layer)}\")\n self._update_layers()\n\n def remove_layer(self, layer: Layer) -> None:\n \"\"\"Remove a layer from the list of activation layers.\n \"\"\"\n self._fixed_layers.remove(layer)\n self._update_layers()\n\n def set_classification(self, classification: bool = True) -> None:\n \"\"\"Record the classification results. This assumes that the network\n is a classifier and the results are provided in the last\n layer.\n \"\"\"\n if classification != self._classification:\n self._classification = classification\n self._update_layers()\n\n def _update_layers(self) -> None:\n network = self.network\n if network is None or not network.prepared:\n return # nothing to do\n\n layers = set()\n # FIXME[problem]: does not work with QObserver:\n # 'QObserverHelper' object has no attribute 'layers_of_interest'\n # for observer in self._observers:\n # layers |= observer.layers_of_interest(self)\n layer_ids = set(map(layer_key, layers))\n\n layer_ids |= set(map(layer_key, self._fixed_layers))\n if self._classification and isinstance(network, Classifier):\n layer_ids |= {network.score_layer.key}\n\n # from set to list\n layer_ids = [layer_key for layer in network.layer_names()\n if layer_key in layer_ids]\n\n got_new_layers = layer_ids > self._layer_ids and self._data is not None\n self._layer_ids = layer_ids\n if got_new_layers:\n self.work(self._data)\n\n #\n # work on Datasource\n #\n\n def extract_activations(self, datasource: Datasource,\n batch_size: int = 128) -> None:\n \"\"\"Compute network activation values for data from a\n :py:class:`Datasource`.\n\n Activation values are stored in a variable called `result`.\n \"\"\"\n samples = len(datasource)\n # Here we could:\n # np.memmap(filename, dtype='float32', mode='w+',\n # shape=(samples,) + network[layer].output_shape[1:])\n results = {\n layer: np.ndarray((samples,) +\n self.tool.network[layer].output_shape[1:])\n for layer in self._layer_ids\n }\n\n fetcher = Datafetcher(datasource, batch_size=batch_size)\n\n try:\n index = 0\n for batch in fetcher:\n print(\"dl-activation: \"\n f\"processing batch of length {len(batch)} \"\n f\"with elements given as {type(batch.array)}, \"\n f\"first element having index {batch[0].index} and \"\n f\"shape {batch[0].array.shape} [{batch[0].array.dtype}]\")\n # self.work() will make `batch` the current data object\n # of this Worker (self._data) and store activation values\n # as attributes of that data object:\n self.work(batch, run=False)\n\n # obtain the activation values from the current data object\n activations = self.activations()\n\n # print(type(activations), len(activations))\n print(\"dl-activation: activations are of type \"\n f\"{type(activations)} of length {len(activations)}\")\n if isinstance(activations, dict):\n for index, (layer, values) in \\\n enumerate(activations.items()):\n print(f\"dl-activation: [{index}]: {values.shape}\")\n results[layer][index:index+len(batch)] = values\n elif isinstance(activations, list):\n print(\"dl-activation: \"\n f\"first element is {type(activations[0])} \"\n f\"with shape {activations[0].shape} \"\n f\"[{activations[0].dtype}]\")\n for index, values in enumerate(activations):\n print(f\"dl-activation: [{index}]: {values.shape}\")\n layer = self._layer_ids[index]\n results[layer][index:index+len(batch)] = values\n print(\"dl-activation: batch finished in \"\n f\"{self.tool.duration(self._data)*1000:.0f} ms.\")\n except KeyboardInterrupt:\n # print(f\"error procesing {data.filename} {data.shape}\")\n print(\"Keyboard interrupt\")\n # self.output_status(top, end='\\n')\n except InterruptedError:\n print(\"Interrupted.\")\n finally:\n print(\"dl-activation: finished processing\")\n # signal.signal(signal.SIGINT, original_sigint_handler)\n # signal.signal(signal.SIGQUIT, original_sigquit_handler)\n\n def iterate_activations(self, datasource: Datasource,\n batch_size: int = 128) -> Iterator:\n \"\"\"Iterate over a :py:class:`Datasource` and compute activation\n values for all :py:class:`Data` from that source.\n\n \"\"\"\n\n fetcher = Datafetcher(datasource, batch_size=batch_size)\n\n index = 0\n for data in fetcher:\n print(\"iterate_activations: \"\n f\"processing {'batch' if data.is_batch else 'data'}\")\n self.work(data, run=False)\n activations = self.activations()\n if data.is_batch:\n for index, _view in enumerate(data):\n yield {layer: activations[layer][index]\n for layer in activations}\n else:\n yield activations\n\n\n#\n#\n# OLD\n#\n#\n\n\nclass OldTopActivations(TopActivations):\n \"\"\"FIXME[old]: old methods, probably can be removed ...\n \"\"\"\n activations = None\n index_batch_start = None\n _top_indices = None\n _top_activations = None\n _fixed_layers = None\n _data = None\n\n def _old_update_values(self, layer: str, value: np.ndarray,\n index: np.ndarray = None) -> None:\n \"\"\"Update the top activation lists with new values.\n\n Arguments\n ---------\n layer:\n The layer for which top activation values should be\n updated.\n value:\n The activation map. This is expected to be of shape\n (batch, position..., channel).\n index:\n The index of the activation value in the\n :py:class:`Datasource`-\n \"\"\"\n layer = self._network[layer]\n\n # slim_values have shape (batch*position..., channel)\n slim_values = value.reshape((-1, value.shape[-1]))\n\n # top_slim: array of shape (top, channel),\n # containing the indices in the value array for the top elements\n # for each channel (i.e. values from 0 to len(slim_values))\n top = min(self._top, len(slim_values))\n top_slim = nphelper.argmultimax(slim_values, num=top, axis=0)\n\n # top_activations: (top, channel)\n # top_activations = np.take_along_axis(slim_values, top_slim, axis=0)\n\n # the index shape is (batch, positions...), without channel\n shape = (len(value), ) + layer.output_shape[1:-1]\n\n # top_indices have index as (batch, position..., channel)\n # indices * (top, channel)\n # -> (indices, top, channel)\n # -> (channel, top, indices)\n top_indices = np.stack(np.unravel_index(top_slim, shape)).T\n\n # adapt the batch index\n if index is not None:\n top_indices[:, :, 0] = index[top_indices[:, :, 0]]\n\n def old_merge_layer_top_activations(self, layer: Layer, top: int = None):\n \"\"\"Old merge_layer_top_activations implementation\n \"\"\"\n # channel last (batch, height, width, channel)\n new_activations = \\\n self.activations(layer).reshape(-1, self.actviations.shape[-1])\n\n batch_len = len(new_activations)\n data_len = batch_len // self.actviations.shape[0]\n start_index = self.index_batch_start * data_len\n\n # activations has shape (batch, classes)\n batch = np.arange(batch_len)\n if top is None:\n top_indices = np.argmax(new_activations, axis=-1)\n else:\n # Remark: here we could use np.argsort(-class_scores)[:n]\n # but that may be slow for a large number classes,\n # as it does a full sort. The numpy.partition provides a faster,\n # though somewhat more complicated method.\n top_indices_unsorted = \\\n np.argpartition(-new_activations, top)[batch, :top]\n order = \\\n np.argsort((-new_activations)[batch, top_indices_unsorted.T].T)\n new_top_indices = top_indices_unsorted[batch, order.T].T\n\n if not start_index:\n self._top_indices[layer] = new_top_indices\n self._top_activations[layer] = new_activations[top_indices]\n else:\n merged_indices = np.append(self._top_indices[layer],\n new_top_indices + start_index)\n merged_activations = np.append(self._top_activations[layer],\n new_activations[top_indices])\n\n sort = np.argsort(merged_activations)\n self._top_indices[layer] = merged_indices[:sort]\n self._top_activations[layer] = merged_activations[:sort]\n\n @staticmethod\n def old_top_activations(activations: np.ndarray, top: int = 9,\n datasource_index: int = None) -> None:\n \"\"\"Get the top activattion values and their indices in a\n batch of activation maps.\n\n Arguments\n ---------\n activations:\n A batch of activation maps of shape\n (batch, position..., channels).\n top:\n The number of top values to extract.\n datasource_index:\n\n Result\n ------\n top_activations:\n This is an array of shape (top, channels)\n top_indices:\n This is an array of shape (top, 2, channels).\n [n,0,channel] is the index of the datapoint in the datasource,\n while [n,1,channel] is the (1-dimensional) index in the\n activation map. This second index may have to be unraveled\n to obtain real activation map coordinates.\n \"\"\"\n # remember the original shape\n shape = activations.shape\n\n # flatten activations per channel\n # ([batch,] position..., channel) -> (indices, channel)\n activations = np.reshape(activations, (-1, shape[-1]))\n\n # get indices for top activations per channel, shape: (top, channels)\n # Remark: here we could use np.argsort(-class_scores)[:n]\n # but that may be slow for a large number classes,\n # as it does a full sort. The numpy.partition provides a faster,\n # though somewhat more complicated method.\n top_indices_unsorted = \\\n np.argpartition(-activations, top, axis=0)[:top]\n\n # get correspondig (unsorted) top activations: shape (top, channels)\n top_activations = \\\n activations[np.arange(top), top_indices_unsorted.T].T\n\n if isinstance(datasource_index, np.ndarray):\n # working on a batch:\n # math.prod ist only available from 3.8 onward ...\n # batch_shape = (shape[0], math.prod(shape[1:-1]))\n batch_shape = (shape[0], np.prod(shape[1:-1]))\n # batch_shape = \\\n # (shape[0], functools.reduce(operator.mul, shape[1:-1]))\n # pylint: disable=unbalanced-tuple-unpacking\n batch_indices, position_indices = \\\n np.unravel_index(top_indices_unsorted, batch_shape)\n datasource_indices = datasource_index[batch_indices]\n top_indices = np.append(datasource_indices[:, np.newaxis],\n position_indices[:, np.newaxis], axis=1)\n else:\n # working on activations for a single input:\n position_indices = top_indices_unsorted[:, np.newaxis]\n datasource_indices = \\\n np.full(position_indices.shape, datasource_index, np.int)\n # shape: (top, 2, channels)\n top_indices = \\\n np.append(datasource_indices, position_indices, axis=1)\n\n return top_activations, top_indices\n\n @staticmethod\n def old_merge_top_activations(top_activations: np.ndarray,\n top_indices: np.ndarray,\n new_activations: np.ndarray,\n new_indices: np.ndarray) -> None:\n \"\"\"Merge activation values into top-n highscore. Both activation data\n consists of two arrays, the first (top_activations) the\n holding the actual activation values and the second\n (top_indices) holding the corresponding indices of the top\n scores.\n\n Arguments\n ---------\n top_activations:\n activation values of shape (top, channels)\n\n top_indices:\n corresponding indices in dataset / position of shape\n (top, 2, channels)\n\n new_activations:\n activation values of shape (top, channels)\n new_indices:\n corresponding indices in dataset / position of shape\n (top, 2, channels)\n\n \"\"\"\n top = len(top_activations)\n merged_indices = np.append(top_indices, new_indices)\n merged_activations = np.append(top_activations, new_activations)\n sort = np.argsort(-merged_activations, axis=0)\n top_indices[:] = merged_indices[sort[:top]]\n top_activations[:] = merged_activations[sort[:top]]\n\n def old_init_layer_top_activations(self, layers=None,\n top: int = 9) -> None:\n \"\"\"old implementation of init_layer_top_activations\n \"\"\"\n if layers is None:\n layers = self._fixed_layers\n for layer in layers:\n self._top_activations[layer] = \\\n np.full((layer.filters, layer.filters), -np.inf)\n # index: (datasource index, fiter index)\n self._top_indices[layer] = \\\n np.full((layer.filters, 2, layer.filters),\n np.nan, dtype=np.int)\n\n def old_update_layer_top_activations(self, layers=None,\n top: int = 9) -> None:\n \"\"\"old implementation of update_layer_top_activations\n \"\"\"\n if layers is None:\n layers = self._fixed_layers\n for layer in layers:\n top_activations, top_indices = \\\n self._top_activations(self.activations(layer),\n datasource_index=self._data.index)\n self._old_merge_top_activations(self._top_activations[layer],\n self._top_indices[layer],\n top_activations, top_indices)\n\n @staticmethod\n def _old_merge_top(target_owners: np.ndarray, target_scores: np.ndarray,\n new_owners: np.ndarray, new_scores: np.ndarray) -> None:\n \"\"\"me\n \"\"\"\n # indices: shape = (size, top, indices)\n # values: shape = (size, top)\n top = target_scores.shape[1]\n indices = np.append(target_owners, new_owners, axis=1)\n values = np.append(target_scores, new_scores, axis=1)\n\n # top_indices: shape = (size, top)\n top_indices = nphelper.argmultimax(values, top, axis=1)\n target_scores[:] = np.take_along_axis(values, top_indices, axis=1)\n # FIXME[bug]: ValueError: `indices` and `arr` must have the\n # same number of dimensions\n # target_owners[:] = np.take_along_axis(indices, top_indices, axis=1)\n for coordinate in range(target_owners.shape[-1]):\n target_owners[:, :, coordinate] = \\\n np.take_along_axis(indices[:, :, coordinate],\n top_indices, axis=1)\n\n\nActivationslike = np.ndarray\n\n\nclass ActivationComparison:\n \"\"\"Compare activation values collected from two layers (same or\n different size), which can be from same or different networks,\n different epochs in training, etc.\n\n \"\"\"\n\nclass ActivationProbe:\n \"\"\"Probe the performance achievable from given activation activation\n values in a supervised learning task\n \"\"\"\n\n def train(self, activations: Activationslike, labels, **kwargs) -> None:\n \"\"\"Train the activation probe on a given supervised\n learning problem.\n\n Arguments\n ---------\n activations:\n The activation values used as input values for training.\n labels:\n Corresponding labels for the activation values.\n \"\"\"\n\n def accuracy(self, activations: Activationslike, labels,\n **kwargs) -> float:\n \"\"\"Apply the probe to an evaluation set to obtain the probe accuracy.\n\n Arguments\n ---------\n activations:\n The activation values used as input values the evaluation.\n labels:\n Corresponding labels for the activation values.\n \"\"\"\n"
] | [
[
"numpy.full",
"numpy.reshape",
"numpy.asarray",
"numpy.unravel_index",
"numpy.ndarray",
"numpy.take_along_axis",
"numpy.arange",
"numpy.memmap",
"numpy.argsort",
"numpy.append",
"numpy.argmax",
"numpy.argpartition",
"numpy.prod",
"numpy.dtype"
]
] |
jerrysong1324/lux | [
"b01f6f47f02340e28332863a4fba573539986767"
] | [
"lux/vislib/altair/AltairRenderer.py"
] | [
"# Copyright 2019-2020 The Lux Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport lux\nimport pandas as pd\nfrom typing import Callable\nfrom lux.vislib.altair.BarChart import BarChart\nfrom lux.vislib.altair.ScatterChart import ScatterChart\nfrom lux.vislib.altair.LineChart import LineChart\nfrom lux.vislib.altair.Histogram import Histogram\nfrom lux.vislib.altair.Heatmap import Heatmap\nfrom lux.vislib.altair.Choropleth import Choropleth\n\n\nclass AltairRenderer:\n \"\"\"\n Renderer for Charts based on Altair (https://altair-viz.github.io/)\n \"\"\"\n\n def __init__(self, output_type=\"VegaLite\"):\n self.output_type = output_type\n\n def __repr__(self):\n return f\"AltairRenderer\"\n\n def create_vis(self, vis, standalone=True):\n \"\"\"\n Input Vis object and return a visualization specification\n\n Parameters\n ----------\n vis: lux.vis.Vis\n Input Vis (with data)\n standalone: bool\n Flag to determine if outputted code uses user-defined variable names or can be run independently\n Returns\n -------\n chart : altair.Chart\n Output Altair Chart Object\n \"\"\"\n # Lazy Evaluation for 2D Binning\n if vis.mark == \"scatter\" and vis._postbin:\n vis._mark = \"heatmap\"\n from lux.executor.PandasExecutor import PandasExecutor\n\n PandasExecutor.execute_2D_binning(vis)\n # If a column has a Period dtype, or contains Period objects, convert it back to Datetime\n if vis.data is not None:\n for attr in list(vis.data.columns):\n if pd.api.types.is_period_dtype(vis.data.dtypes[attr]) or isinstance(\n vis.data[attr].iloc[0], pd.Period\n ):\n dateColumn = vis.data[attr]\n vis.data[attr] = pd.PeriodIndex(dateColumn.values).to_timestamp()\n if pd.api.types.is_interval_dtype(vis.data.dtypes[attr]) or isinstance(\n vis.data[attr].iloc[0], pd.Interval\n ):\n vis.data[attr] = vis.data[attr].astype(str)\n if isinstance(attr, str):\n if \".\" in attr:\n attr_clause = vis.get_attr_by_attr_name(attr)[0]\n # Suppress special character \".\", not displayable in Altair\n # attr_clause.attribute = attr_clause.attribute.replace(\".\", \"\")\n vis._vis_data = vis.data.rename(columns={attr: attr.replace(\".\", \"\")})\n if vis.mark == \"histogram\":\n chart = Histogram(vis)\n elif vis.mark == \"bar\":\n chart = BarChart(vis)\n elif vis.mark == \"scatter\":\n chart = ScatterChart(vis)\n elif vis.mark == \"line\":\n chart = LineChart(vis)\n elif vis.mark == \"heatmap\":\n chart = Heatmap(vis)\n elif vis.mark == \"geographical\":\n chart = Choropleth(vis)\n else:\n chart = None\n\n if chart:\n if lux.config.plotting_style and (\n lux.config.plotting_backend == \"vegalite\" or lux.config.plotting_backend == \"altair\"\n ):\n chart.chart = lux.config.plotting_style(chart.chart)\n if self.output_type == \"VegaLite\":\n chart_dict = chart.chart.to_dict()\n # this is a bit of a work around because altair must take a pandas dataframe and we can only generate a luxDataFrame\n # chart[\"data\"] = { \"values\": vis.data.to_dict(orient='records') }\n # chart_dict[\"width\"] = 160\n # chart_dict[\"height\"] = 150\n chart_dict[\"vislib\"] = \"vegalite\"\n return chart_dict\n elif self.output_type == \"Altair\":\n import inspect\n\n source = \"\"\n if lux.config.plotting_style:\n if \"def custom_config(chart):\" in lux.config.plotting_style_code:\n source = lux.config.plotting_style_code\n else:\n source = inspect.getsource(lux.config.plotting_style)\n default_vis_style_code = \"# Default Lux Style \\nchart = chart.configure_title(fontWeight=500,fontSize=13,font='Helvetica Neue')\\n\"\n default_vis_style_code += \"chart = chart.configure_axis(titleFontWeight=500,titleFontSize=11,titleFont='Helvetica Neue',\\n\"\n default_vis_style_code += \"\\t\\t\\t\\t\\tlabelFontWeight=400,labelFontSize=9,labelFont='Helvetica Neue',labelColor='#505050')\\n\"\n default_vis_style_code += \"chart = chart.configure_legend(titleFontWeight=500,titleFontSize=10,titleFont='Helvetica Neue',\\n\"\n default_vis_style_code += (\n \"\\t\\t\\t\\t\\tlabelFontWeight=400,labelFontSize=9,labelFont='Helvetica Neue')\\n\"\n )\n default_vis_style_code += \"chart = chart.properties(width=160,height=150)\\n\"\n vis_style_code = \"\\n# Custom Style Additions\"\n # TODO: improve parsing such that it splits based on line of logic instead of line of code\n for line in source.split(\"\\n \")[1:-1]:\n if line.strip() not in default_vis_style_code:\n vis_style_code += \"\\n\" + line\n if vis_style_code == \"\\n# Custom Style Additions\":\n vis_style_code = default_vis_style_code\n else:\n vis_style_code = default_vis_style_code + vis_style_code\n vis_style_code = vis_style_code.replace(\"\\n\\t\\t\", \"\\n\")\n vis_style_code = vis_style_code.replace(\"\\n \", \"\\n\")\n lux.config.plotting_style_code = vis_style_code\n chart.code = chart.code.replace(\"\\n\\t\\t\", \"\\n\")\n chart.code = chart.code.replace(\"\\n \", \"\\n\")\n\n var = vis._source\n if var is not None:\n all_vars = []\n for f_info in inspect.getouterframes(inspect.currentframe()):\n local_vars = f_info.frame.f_back\n if local_vars:\n callers_local_vars = local_vars.f_locals.items()\n possible_vars = [\n var_name for var_name, var_val in callers_local_vars if var_val is var\n ]\n all_vars.extend(possible_vars)\n found_variable = [\n possible_var for possible_var in all_vars if possible_var[0] != \"_\"\n ][0]\n else: # if vis._source was not set when the Vis was created\n found_variable = \"df\"\n if standalone:\n chart.code = chart.code.replace(\n \"placeholder_variable\",\n f\"pd.DataFrame({str(vis.data.to_dict())})\",\n )\n else:\n # TODO: Placeholder (need to read dynamically via locals())\n chart.code = chart.code.replace(\"placeholder_variable\", found_variable)\n return chart.code\n"
] | [
[
"pandas.api.types.is_period_dtype",
"pandas.PeriodIndex",
"pandas.api.types.is_interval_dtype"
]
] |
apdealbao/openpathsampling | [
"26997b43745e197abb91cdb3f51916da0cfc1774"
] | [
"openpathsampling/analysis/shooting_point_analysis.py"
] | [
"import openpathsampling as paths\nimport collections\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntry:\n from collections import abc\nexcept ImportError:\n import collections as abc\n\n# based on http://stackoverflow.com/a/3387975\nclass TransformedDict(abc.MutableMapping):\n \"\"\"A dictionary that applies an arbitrary key-altering function before\n accessing the keys\n\n This implementation involves a particular hashing function. It is\n assumed that any two input objects which give the same hash are\n effectively identical, allowing later rehashing based on the same.\n \"\"\"\n\n def __init__(self, hash_function, *args, **kwargs):\n self.store = dict()\n self.hash_representatives = dict()\n self.hash_function = hash_function\n self.update(dict(*args, **kwargs)) # use the free update to set keys\n\n def __getitem__(self, key):\n return self.store[self.hash_function(key)]\n\n def __setitem__(self, key, value):\n hashed = self.hash_function(key)\n if hashed not in self.hash_representatives:\n self.hash_representatives[hashed] = key\n self.store[hashed] = value\n\n def __delitem__(self, key):\n hashed = self.hash_function(key)\n del self.store[hashed]\n del self.hash_representatives[hashed]\n\n def __iter__(self):\n return iter(self.hash_representatives.values())\n\n def __len__(self):\n return len(self.store)\n\n def rehash(self, new_hash):\n \"\"\"Create a new TransformedDict with this data and new hash.\n\n It is up to the user to ensure that the mapping from the old hash to\n the new is a function (i.e., each entry from the old hash can be\n mapped directly onto the new hash).\n\n For example, this is used to map from a snapshot's coordinates to\n a collective variable based on the coordinates. However, if the\n orignal hash was based on coordinates, but the new hash included\n velocities, the resulting mapping would be invalid. It is up to the\n user to avoid such invalid remappings.\n \"\"\"\n return TransformedDict(new_hash,\n {self.hash_representatives[k]: self.store[k]\n for k in self.store})\n\n\nclass SnapshotByCoordinateDict(TransformedDict):\n \"\"\"TransformedDict that uses snapshot coordinates as keys.\n\n This is primarily used to have a unique key for shooting point analysis\n (e.g., committor analysis).\n \"\"\"\n def __init__(self, *args, **kwargs):\n hash_fcn = lambda x : x.coordinates.tostring()\n super(SnapshotByCoordinateDict, self).__init__(hash_fcn,\n *args, **kwargs)\n\n\nclass ShootingPointAnalysis(SnapshotByCoordinateDict):\n \"\"\"\n Container and methods for shooting point analysis.\n\n This is especially useful for analyzing committors, which is\n automatically done on a per-configuration basis, and can also be done\n as a histogram.\n\n Parameters\n ----------\n steps : iterable of :class:`.MCStep` or None\n input MC steps to analyze; if None, no analysis performed\n states : list of :class:`.Volume`\n volumes to consider as states for the analysis. For pandas output,\n these volumes must be named.\n \"\"\"\n def __init__(self, steps, states):\n super(ShootingPointAnalysis, self).__init__()\n self.states = states\n if steps is not None:\n self.analyze(steps)\n\n def analyze(self, steps):\n \"\"\"Analyze a list of steps, adding to internal results.\n\n Parameters\n ----------\n steps : iterable of :class:`.MCStep` or None\n MC steps to analyze\n \"\"\"\n for step in steps:\n total = self.analyze_single_step(step)\n\n def analyze_single_step(self, step):\n \"\"\"\n Analyzes final states from a path sampling step. Adds to internal\n results.\n\n Parameters\n ----------\n step : :class:`.MCStep`\n the step to analyze and add to this analysis\n\n Returns\n -------\n list of :class:`.Volume`\n the states which are identified as new final states from this\n move\n \"\"\"\n key = self.step_key(step)\n if key is not None:\n details = step.change.canonical.details\n trial_traj = step.change.canonical.trials[0].trajectory\n init_traj = details.initial_trajectory\n test_points = [s for s in [trial_traj[0], trial_traj[-1]]\n if s not in [init_traj[0], init_traj[-1]]]\n\n total = collections.Counter(\n {state: sum([int(state(pt)) for pt in test_points])\n for state in self.states}\n )\n total_count = sum(total.values())\n # TODO: clarify assertion (at least one endpoint in state)\n assert total_count == 1 or total_count == 2\n try:\n self[key] += total\n except KeyError:\n self[key] = total\n else:\n total = {}\n\n return [s for s in total.keys() if total[s] > 0]\n\n @staticmethod\n def step_key(step):\n \"\"\"\n Returns the key we use for hashing (the shooting snapshot).\n\n Parameters\n ----------\n step : :class:`.MCStep`\n the step to extract a shooting point from\n\n Returns\n -------\n :class:`.Snapshot` or None\n the shooting snapshot, or None if this step is not a shooting\n move.\n \"\"\"\n key = None\n try:\n details = step.change.canonical.details\n shooting_snap = details.shooting_snapshot\n except AttributeError:\n # wrong kind of move (no shooting_snapshot)\n pass\n except IndexError:\n # very wrong kind of move (no trials!)\n pass\n else:\n # easy to change how we define the key\n key = shooting_snap\n return key\n\n @classmethod\n def from_individual_runs(cls, run_results, states=None):\n \"\"\"Build shooting point analysis from pairs of shooting point to\n final state.\n\n Parameters\n ----------\n run_results : list of 2-tuples (:class:`.Snapshot`, :class:`.Volume`)\n the first element in each pair is the shooting point, the second\n is the final volume\n \"\"\"\n if states is None:\n states = set(s[1] for s in run_results)\n analyzer = ShootingPointAnalysis(None, states)\n for step in run_results:\n key = step[0]\n total = collections.Counter({step[1] : 1})\n try:\n analyzer[key] += total\n except KeyError:\n analyzer[key] = total\n\n return analyzer\n\n def committor(self, state, label_function=None):\n \"\"\"Calculate the (point-by-point) committor.\n\n This is for the point-by-point (per-configuration) committor, not\n for histograms. See `committor_histogram` for the histogram version.\n\n Parameters\n ----------\n state : :class:`.Volume`\n the committor is 1.0 if 100% of shots enter this state\n label_function : callable\n the keys for the dictionary that is returned are\n `label_function(snapshot)`; default `None` gives the snapshot as\n key.\n\n Returns\n -------\n dict :\n mapping labels given by label_function to the committor value\n \"\"\"\n if label_function is None:\n label_function = lambda s : s\n results = {}\n for k in self:\n out_key = label_function(k)\n counter_k = self[k]\n committor = float(counter_k[state]) / sum([counter_k[s] for s in self.states])\n results[out_key] = committor\n return results\n\n @staticmethod\n def _get_key_dim(key):\n try:\n ndim = len(key)\n except TypeError:\n ndim = 1\n if ndim > 2 or ndim < 1:\n raise RuntimeError(\"Histogram key dimension {0} > 2 or {0} < 1 \"\n + \"(key: {1})\".format(ndim, key))\n return ndim\n\n def committor_histogram(self, new_hash, state, bins=10):\n \"\"\"Calculate the histogrammed version of the committor.\n\n Parameters\n ----------\n new_hash : callable\n values are histogrammed in bins based on new_hash(snapshot)\n state : :class:`.Volume`\n the committor is 1.0 if 100% of shots enter this state\n bins : see numpy.histogram\n bins input to numpy.histogram\n\n Returns\n -------\n tuple :\n hist, bins like numpy.histogram, where hist is the histogram\n count and bins is the bins output from numpy.histogram. 2-tuple\n in the case of 1D histogram, 3-tuple in the case of 2D histogram\n \"\"\"\n rehashed = self.rehash(new_hash)\n r_store = rehashed.store\n count_all = {k : sum(r_store[k].values()) for k in r_store}\n count_state = {k : r_store[k][state] for k in r_store}\n ndim = self._get_key_dim(list(r_store.keys())[0])\n if ndim == 1:\n (all_hist, b) = np.histogram(list(count_all.keys()),\n weights=list(count_all.values()),\n bins=bins)\n (state_hist, b) = np.histogram(list(count_state.keys()),\n weights=list(count_state.values()),\n bins=bins)\n b_list = [b]\n elif ndim == 2:\n (all_hist, b_x, b_y) = np.histogram2d(\n x=[k[0] for k in count_all],\n y=[k[1] for k in count_all],\n weights=list(count_all.values()),\n bins=bins\n )\n (state_hist, b_x, b_y) = np.histogram2d(\n x=[k[0] for k in count_state],\n y=[k[1] for k in count_state],\n weights=list(count_state.values()),\n bins=bins\n )\n b_list = [b_x, b_y]\n # if all_hist is 0, state_hist is NaN: ignore warning, return NaN\n with np.errstate(divide='ignore', invalid='ignore'):\n state_frac = np.true_divide(state_hist, all_hist)\n return tuple([state_frac] + b_list)\n\n def to_pandas(self, label_function=None):\n \"\"\"\n Pandas dataframe. Row for each configuration, column for each state.\n\n Parameters\n ----------\n label_function : callable\n takes snapshot, returns index to use for pandas.DataFrame\n \"\"\"\n transposed = pd.DataFrame(self.store).transpose().to_dict()\n df = pd.DataFrame(transposed)\n df.columns = [s.name for s in transposed.keys()]\n if label_function is None:\n df.index = range(len(df.index))\n else:\n # TODO: is ordering guaranteed here?\n df.index = [label_function(self.hash_representatives[k])\n for k in self.store]\n return df\n"
] | [
[
"pandas.DataFrame",
"numpy.true_divide",
"numpy.errstate"
]
] |
NishthaShukla/ga-learner-dsmp-repo | [
"a6be62c4a37619a7a900eec8e7b81b33e29a655c"
] | [
"Car-insurance-claim/code.py"
] | [
"# --------------\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\n# Code starts here\ndf = pd.read_csv(path)\n\nprint(df.head())\n\nprint(df.info())\n\ncolumns = ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']\n\nfor col in columns: \n df[col].replace({'\\$': '', ',': ''}, regex=True,inplace=True)\n\n\n\nprint(df.head())\n\nX = df.drop(columns=['CLAIM_FLAG'])\n\ny = df['CLAIM_FLAG']\n\ncount = y.value_counts()\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3, random_state = 6) \n# Code ends here\n\n\n# --------------\n# Code starts here\n\nX_train[['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']] = X_train[['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']].astype(float)\n\nX_test[['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']] = X_test[['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']].astype(float)\n\nprint(X_train.isnull().sum())\n\nprint(X_test.isnull().sum())\n# Code ends here\n\n\n# --------------\n# Code starts here\n\nX_train.dropna(subset=['YOJ','OCCUPATION'],inplace = True)\n\nX_test.dropna(subset=['YOJ','OCCUPATION'],inplace=True)\n\nprint(X_train.isnull().sum())\n\ny_train = y_train.reindex(y_train[X_train.index])\n\ny_test = y_test.reindex(y_test[X_test.index])\n\nX_train['AGE'].fillna((X_train['AGE'].mean()), inplace=True)\n\nX_train['CAR_AGE'].fillna((X_train['CAR_AGE']),inplace=True)\n\nX_train['INCOME'].fillna((X_train['INCOME']),inplace=True)\n\nX_train['HOME_VAL'].fillna((X_train['HOME_VAL']),inplace=True)\n\nX_test['AGE'].fillna((X_test['AGE'].mean()), inplace=True)\n\nX_test['CAR_AGE'].fillna((X_test['CAR_AGE']),inplace=True)\n\nX_test['INCOME'].fillna((X_test['INCOME']),inplace=True)\n\nX_test['HOME_VAL'].fillna((X_test['HOME_VAL']),inplace=True)\n# Code ends here\n\n\n# --------------\nfrom sklearn.preprocessing import LabelEncoder\ncolumns = [\"PARENT1\",\"MSTATUS\",\"GENDER\",\"EDUCATION\",\"OCCUPATION\",\"CAR_USE\",\"CAR_TYPE\",\"RED_CAR\",\"REVOKED\"]\n\n# Code starts here\nfor col in columns:\n le = LabelEncoder()\n X_train[col] = le.fit_transform(X_train[col].astype(str))\n X_test[col] = le.transform(X_test[col].astype(str))\n\n# Code ends here\n\n\n\n# --------------\nfrom sklearn.metrics import precision_score \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\n\n\n\n# code starts here \n\nmodel = LogisticRegression(random_state=6)\n\nmodel.fit(X_train,y_train)\n\ny_pred = model.predict(X_test)\n\nscore = accuracy_score(y_test,y_pred)\n\nprint(score)\n# Code ends here\n\n\n# --------------\nfrom sklearn.preprocessing import StandardScaler\nfrom imblearn.over_sampling import SMOTE\n\n# code starts here\nsmote = SMOTE(random_state=9)\n\nX_train,y_train = smote.fit_sample(X_train,y_train)\n\nscaler = StandardScaler()\n\nX_train = scaler.fit_transform(X_train)\n\nX_test = scaler.transform(X_test)\n\n# Code ends here\n\n\n# --------------\n# Code Starts here\nmodel = LogisticRegression()\n\nmodel.fit(X_train,y_train)\n\ny_pred = model.predict(X_test)\n\nscore = accuracy_score(y_test,y_pred)\n# Code ends here\n\n\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
samanfrm/gbnet | [
"de737602d88f8ad77b50803f0f9a50296426fcd6"
] | [
"gbnet/aux.py"
] | [
"import os\nimport psutil\nfrom datetime import timedelta\nimport time\n\nimport numpy as np\nimport pandas as pd\nfrom num2words import num2words\n\ndef genData(NX=3, num_active_tfs=2, NY=50, AvgNTF=0.5):\n # Start generating simulated data\n # The TFs: Number of TF and activation state of each\n\n # the target genes: Number of target genes and state of each\n \n Xgt = dict(zip(list(range(NX)), [0]*NX))\n Y = dict(zip(list(range(NX,NX+NY)), [0]*NY))\n\n # generate a random set of interations between TFs and genes\n edges = {}\n for trg in Y.keys():\n \n # randomize the number of TFs for this gene\n num_edges = round (np.random.gamma(3, AvgNTF/3))\n num_edges = min(num_edges, len(Xgt))\n\n # pick random TFs\n for src in np.random.choice(list(Xgt.keys()), size=num_edges, replace=False):\n # here an edge can be upregulator (1), downregulator (-1) or not valid (0)\n edges[(src, trg)] = np.random.choice([-1, 0, 1], p=[0.05, 0.9, 0.05])\n\n # randomize current activation state for TFs\n # and then determine the state of targeted genes\n for src in np.random.choice(list(Xgt.keys()), size=num_active_tfs, replace=False):\n # either active (1) or inactive (0)\n Xgt[src] = 1\n\n for src in Xgt.keys():\n for trg in Y.keys():\n edge = src, trg\n if edge in edges.keys():\n if edges[edge] < 0 and Xgt[src] > 0:\n # if TF is inhibitor, make sure the response is definitive\n Y[trg] += edges[edge]*Xgt[src]*10000\n else:\n Y[trg] += edges[edge]*Xgt[src]\n\n a = 0.005\n b = 0.0005\n\n # get only the signs for gene activation states\n for trg in Y.keys():\n sgn = np.sign(Y[trg])\n if sgn == -1:\n Y[trg] = np.random.choice([-1, 0, 1], p=[1.-a-b, a, b])\n if sgn == 0:\n Y[trg] = np.random.choice([-1, 0, 1], p=[a, 1.-a-a, a])\n if sgn == 1:\n Y[trg] = np.random.choice([-1, 0, 1], p=[b, a, 1.-a-b])\n \n # generate more random non-applicable edges\n # for src in Xgt.keys():\n \n # # randomize the number of target genes for this TF\n # num_edges = int(np.random.exponential(len(Y)/5))\n # num_edges = min(num_edges, len(Y))\n\n # # pick random genes\n # for trg in np.random.choice(list(Y.keys()), size=num_edges, replace=False):\n # try:\n # edges[(src, trg)] = edges[(src, trg)]\n # except KeyError:\n # edges[(src, trg)] = 0\n \n \n # this is the possible associations data\n rels = pd.DataFrame(list(edges.keys()), columns=['srcuid', 'trguid'])\n rels = rels.assign(val=list(edges.values()))\n\n # extract a dataframe that only contains relevant interactions\n rels = rels[rels['trguid'].isin(Y.keys())]\n rels = rels.assign(edge=[(src, trg) for src, trg in zip(rels['srcuid'], rels['trguid'])])\n rels = rels.assign(srcactive=[Xgt[src] for src in rels['srcuid']])\n rels = rels.set_index('edge')\n rels['type'] = 'conflict'\n\n ents = pd.DataFrame([num2words(i) for i in range(NX+NY)], columns=['name'])\n ents.index.name = 'uid'\n\n return Xgt, ents, rels, Y\n\ndef processTrace(model, Xgt=None, rels=None):\n \n Dx, ADx, Ds, ADs, Dy = model.dictionaries\n del Dy\n\n Xres = model.get_result('X')\n Xres = Xres.assign(gelman_rubin=model.gelman_rubin['X'])\n Xres = Xres.assign(srcuid=[ADx[i] for i in range(len(Dx))])\n Xres = Xres.set_index('srcuid')\n if Xgt is not None:\n Xres = Xres.assign(ground_truth=[Xgt[src] for src in Xres.index])\n\n Rres = model.get_result('R')\n Rres = Rres.assign(gelman_rubin=model.gelman_rubin['R'])\n Rres = Rres.assign(edge=[ADs[i] for i in range(len(Ds))])\n Rres = Rres.set_index('edge')\n srcuid, trguid = zip(*Rres.index.tolist())\n Rres = Rres.assign(srcuid=srcuid)\n Rres = Rres.assign(trguid=trguid)\n Rres = Rres[Rres['srcuid'].isin(Xres.index)]\n if rels is not None:\n Rres = Rres.assign(ground_truth=rels.loc[Rres.index, 'val'].abs())\n\n Sres = model.get_result('S')\n Sres = Sres.assign(gelman_rubin=model.gelman_rubin['S'])\n Sres = Sres.assign(edge=[ADs[i] for i in range(len(Ds))])\n Sres = Sres.set_index('edge')\n srcuid, trguid = zip(*Sres.index.tolist())\n Sres = Sres.assign(srcuid=srcuid)\n Sres = Sres.assign(trguid=trguid)\n Sres = Sres[Sres['srcuid'].isin(Xres.index)]\n if rels is not None:\n Sres = Sres.assign(ground_truth=rels.loc[Sres.index, 'val'])\n\n Xres = Xres.sort_values(by=['mean'])\n Rres = Rres.sort_values(by=['mean'])\n Sres = Sres.sort_values(by=['mean'])\n \n return Xres, Rres, Sres\n \n\ndef updateRes(Xres, Rres, Sres, lenient=[False, False, False], final=False):\n \n lenientX, lenientR, lenientS = lenient\n\n if final:\n conditions = [Xres['mean'] < 0.45, Xres['mean'] >= 0.7]\n elif lenientX:\n conditions = [Xres['mean'] < 0.4, Xres['mean'] >= 0.8]\n else:\n conditions = [Xres['mean'] < 0.2, Xres['mean'] >= 0.8]\n choices = [0, 1]\n Xres = Xres.assign(pred=np.select(conditions, choices, default=-99))\n try:\n Xres = Xres.assign(correctness=Xres['ground_truth']==Xres['pred'])\n except KeyError:\n pass\n\n if final:\n conditions = [Rres['mean'] < 0.40, Rres['mean'] >= 0.50]\n elif lenientR:\n conditions = [Rres['mean'] < 0.35, Rres['mean'] >= 0.65]\n else:\n conditions = [Rres['mean'] < 0.30, Rres['mean'] >= 0.70]\n choices = [0, 1]\n Rres = Rres.assign(pred=np.select(conditions, choices, default=-99))\n try:\n Rres = Rres.assign(correctness=Rres['ground_truth']==Rres['pred'])\n except KeyError:\n pass\n Rres = Rres.assign(srcactive=Xres.loc[Rres.srcuid, 'pred'].tolist())\n Rres = Rres[Rres['srcactive']>0]\n\n if final:\n conditions = [Sres['mean'] < 0.5, Sres['mean'] >= 0.5]\n elif lenientS:\n conditions = [Sres['mean'] < 0.35, Sres['mean'] >= 0.65]\n else:\n conditions = [Sres['mean'] < 0.30, Sres['mean'] >= 0.70]\n choices = [-1, 1]\n Sres = Sres.assign(prediction=np.select(conditions, choices, default=0))\n choices = [0, 1]\n Sres = Sres.assign(pred=np.select(conditions, choices, default=-99))\n try:\n Sres = Sres.assign(correctness=Sres['ground_truth']==Sres['prediction'])\n except KeyError:\n pass\n Sres = Sres.assign(srcactive=Xres.loc[Sres.srcuid, 'pred'].tolist())\n Sres = Sres[Sres['srcactive']>0]\n Sres = Sres[Sres.index.isin(Rres.index)]\n\n if len(Sres) > 0:\n # I think this might break. Be careful when rows differ in both dfs\n Sres['applicable'] = Rres['pred']\n Sres = Sres[Sres['applicable']>0]\n\n if not final:\n Xres = Xres[Xres['pred']!=-99]\n Rres = Rres[Rres['pred']!=-99]\n Sres = Sres[Sres['prediction']!=0]\n \n return Xres, Rres, Sres\n\n\nclass Reporter(object):\n def __init__(self):\n self.ctime = time.time()\n self.stime = self.ctime\n self.ltime = self.ctime\n self.process = psutil.Process(os.getpid())\n self.last_report = \"\"\n\n def reset(self):\n self.ctime = time.time()\n self.stime = self.ctime\n\n def report(self, string='', schar='', lchar='\\n', showram=True, showlast=True):\n self.ctime = time.time()\n total_dt = timedelta(seconds=round(self.ctime - self.stime))\n last_dt = timedelta(seconds=round(self.ctime - self.ltime))\n out = f'{schar}{string}'\n\n if string:\n out += ' -- '\n\n if showlast:\n out += f\"Last: {last_dt}, \"\n\n out += f\"Elapsed: {total_dt}\"\n \n if showram:\n usage = 0\n usage += self.process.memory_info().rss-self.process.memory_info().shared\n for child in self.process.children():\n usage += child.memory_info().rss-child.memory_info().shared\n usage = usage/1073741824\n out += f\", Mem usage: {usage:0.02f}GB\"\n \n print(out, end=lchar)\n self.last_report = out\n if showlast:\n self.ltime = self.ctime\n\n\ndef mutate_data(Y, factor):\n\n a = (1 - factor)/2\n b = a + 0.5\n\n origY = Y.copy()\n Y = origY.copy()\n\n for key, val in Y.items():\n Y[key] = int(round(val*np.random.uniform(a, b))) \n\n ndiff = 0\n for key in origY.keys():\n ndiff += int(Y[key] != origY[key])\n print(f\"{ndiff} entries have been mutated\")\n\n return Y\n\n\ndef mutate_data2(Y, factor):\n\n a = (1 - factor)/2\n b = a + 0.5\n\n origY = Y.copy()\n Y = origY.copy()\n\n for key, val in Y.items():\n if val == 0 and np.random.uniform() < factor:\n Y[key] = int(round(np.random.choice([-1,1])))\n else:\n Y[key] = int(round(val*np.random.uniform(a, b)))\n\n ndiff = 0\n for key in origY.keys():\n ndiff += int(Y[key] != origY[key])\n print(f\"{ndiff} entries have been mutated\")\n\n return Y\n"
] | [
[
"numpy.random.choice",
"numpy.random.gamma",
"numpy.sign",
"numpy.random.uniform",
"numpy.select"
]
] |
balde-soul/TGS | [
"e2156ecd57f88c79dee5361c7c80095a1ee2fec5"
] | [
"model_base.py"
] | [
"# coding=utf-8\nimport tensorflow as tf\nfrom colorama import Fore\nimport numpy as np\nimport logging\nfrom collections import OrderedDict\nimport Putil.DenseNet.model_base as dmb\nfrom tensorflow.contrib import layers\nimport Putil.np.util as npu\nimport Putil.tf.util as tfu\n\n\ndef get_image_summary(img, idx=0):\n \"\"\"\n Make an image summary for 4d tensor image with index idx\n \"\"\"\n\n V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))\n V -= tf.reduce_min(V)\n V /= tf.reduce_max(V)\n V *= 255\n\n img_w = tf.shape(img)[1]\n img_h = tf.shape(img)[2]\n V = tf.reshape(V, tf.stack((img_w, img_h, 1)))\n V = tf.transpose(V, (2, 0, 1))\n V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))\n return V\n\n\ndef weight_variable(shape, stddev=0.1, name=\"weight\"):\n initial = tf.truncated_normal(shape, stddev=stddev)\n return tf.Variable(initial, name=name)\n\n\ndef weight_variable_devonc(shape, stddev=0.1, name=\"weight_devonc\"):\n return tf.Variable(tf.truncated_normal(shape, stddev=stddev), name=name)\n\n\ndef bias_variable(shape, name=\"bias\"):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name=name)\n\n\ndef conv2d(x, W, b, keep_prob_):\n with tf.name_scope(\"conv2d\"):\n conv_2d = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')\n conv_2d_b = tf.nn.bias_add(conv_2d, b)\n return tf.nn.dropout(conv_2d_b, keep_prob_)\n\n\ndef deconv2d(x, W,stride):\n with tf.name_scope(\"deconv2d\"):\n x_shape = tf.shape(x)\n output_shape = tf.stack([x_shape[0], x_shape[1]*2, x_shape[2]*2, x_shape[3]//2])\n return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='VALID', name=\"conv2d_transpose\")\n\n\ndef max_pool(x,n):\n return tf.nn.max_pool(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID')\n\n\ndef crop_and_concat(x1, x2):\n with tf.name_scope(\"crop_and_concat\"):\n x1_shape = tf.shape(x1)\n x2_shape = tf.shape(x2)\n # offsets for the top left corner of the crop\n offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]\n size = [-1, x2_shape[1], x2_shape[2], -1]\n x1_crop = tf.slice(x1, offsets, size)\n return tf.concat([x1_crop, x2], 3)\n\n\ndef pixel_wise_softmax(output_map):\n with tf.name_scope(\"pixel_wise_softmax\"):\n # subtract max is work for avoid overflow\n max_axis = tf.reduce_max(output_map, axis=3, keepdims=True, name='calc_max')\n exponential_map = tf.exp(tf.subtract(output_map, max_axis, 'sub_for_avoid_overflow'), 'exp')\n normalize = tf.reduce_sum(exponential_map, axis=3, keepdims=True, name='exp_sum')\n return tf.div(exponential_map, normalize, name='normalize')\n\n\ndef cross_entropy(y_, output_map):\n return -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(output_map, 1e-10, 1.0)), name=\"cross_entropy\")\n\n\ndef create_conv_net(x, keep_prob, channels, n_class, layers=3, features_root=16, filter_size=3, pool_size=2,\n summaries=True):\n \"\"\"\n Creates a new convolutional unet for the given parametrization.\n :param x: input tensor, shape [?,nx,ny,channels]\n :param keep_prob: dropout probability tensor\n :param channels: number of channels in the input image\n :param n_class: number of output labels\n :param layers: number of layers in the net\n :param features_root: number of features in the first layer\n :param filter_size: size of the convolution filter\n :param pool_size: size of the max pooling operation\n :param summaries: Flag if summaries should be created\n \"\"\"\n\n logging.info(\n Fore.GREEN + \"Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: \"\n \"{pool_size}x{pool_size}\".format(\n layers=layers,\n features=features_root,\n filter_size=filter_size,\n pool_size=pool_size))\n\n # Placeholder for the input image\n with tf.name_scope(\"preprocessing\"):\n nx = tf.shape(x)[1]\n ny = tf.shape(x)[2]\n x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))\n in_node = x_image\n batch_size = tf.shape(x_image)[0]\n\n weights = []\n biases = []\n convs = []\n pools = OrderedDict()\n deconv = OrderedDict()\n dw_h_convs = OrderedDict()\n up_h_convs = OrderedDict()\n\n in_size = 1000\n size = in_size\n # down layers\n for layer in range(0, layers):\n with tf.name_scope(\"down_conv_{}\".format(str(layer))):\n features = 2 ** layer * features_root\n stddev = np.sqrt(2 / (filter_size ** 2 * features))\n if layer == 0:\n w1 = weight_variable([filter_size, filter_size, channels, features], stddev, name=\"w1\")\n else:\n w1 = weight_variable([filter_size, filter_size, features // 2, features], stddev, name=\"w1\")\n\n w2 = weight_variable([filter_size, filter_size, features, features], stddev, name=\"w2\")\n b1 = bias_variable([features], name=\"b1\")\n b2 = bias_variable([features], name=\"b2\")\n\n conv1 = conv2d(in_node, w1, b1, keep_prob)\n tmp_h_conv = tf.nn.relu(conv1)\n conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)\n dw_h_convs[layer] = tf.nn.relu(conv2)\n\n weights.append((w1, w2))\n biases.append((b1, b2))\n convs.append((conv1, conv2))\n\n size -= 4\n if layer < layers - 1:\n pools[layer] = max_pool(dw_h_convs[layer], pool_size)\n in_node = pools[layer]\n size /= 2\n\n in_node = dw_h_convs[layers - 1]\n\n # up layers\n for layer in range(layers - 2, -1, -1):\n with tf.name_scope(\"up_conv_{}\".format(str(layer))):\n features = 2 ** (layer + 1) * features_root\n stddev = np.sqrt(2 / (filter_size ** 2 * features))\n\n wd = weight_variable_devonc([pool_size, pool_size, features // 2, features], stddev, name=\"wd\")\n bd = bias_variable([features // 2], name=\"bd\")\n h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)\n h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)\n deconv[layer] = h_deconv_concat\n\n w1 = weight_variable([filter_size, filter_size, features, features // 2], stddev, name=\"w1\")\n w2 = weight_variable([filter_size, filter_size, features // 2, features // 2], stddev, name=\"w2\")\n b1 = bias_variable([features // 2], name=\"b1\")\n b2 = bias_variable([features // 2], name=\"b2\")\n\n conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)\n h_conv = tf.nn.relu(conv1)\n conv2 = conv2d(h_conv, w2, b2, keep_prob)\n in_node = tf.nn.relu(conv2)\n up_h_convs[layer] = in_node\n\n weights.append((w1, w2))\n biases.append((b1, b2))\n convs.append((conv1, conv2))\n\n size *= 2\n size -= 4\n\n # Output Map\n with tf.name_scope(\"output_map\"):\n weight = weight_variable([1, 1, features_root, n_class], stddev)\n bias = bias_variable([n_class], name=\"bias\")\n conv = conv2d(in_node, weight, bias, tf.constant(1.0))\n output_map = tf.nn.relu(conv)\n up_h_convs[\"out\"] = output_map\n\n if summaries:\n with tf.name_scope(\"summaries\"):\n for i, (c1, c2) in enumerate(convs):\n tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))\n tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))\n\n for k in pools.keys():\n tf.summary.image('summary_pool_%02d' % k, get_image_summary(pools[k]))\n\n for k in deconv.keys():\n tf.summary.image('summary_deconv_concat_%02d' % k, get_image_summary(deconv[k]))\n\n for k in dw_h_convs.keys():\n tf.summary.histogram(\"dw_convolution_%02d\" % k + '/activations', dw_h_convs[k])\n\n for k in up_h_convs.keys():\n tf.summary.histogram(\"up_convolution_%s\" % k + '/activations', up_h_convs[k])\n\n variables = []\n for w1, w2 in weights:\n variables.append(w1)\n variables.append(w2)\n\n for b1, b2 in biases:\n variables.append(b1)\n variables.append(b2)\n\n return output_map, variables, int(in_size - size)\n\n\ndef __reducor_for_DenseUNet(\n output_map,\n training,\n params\n):\n param_dtype = tfu.tf_type(params.get('param_dtype')).Type\n regularize_weight = params.get('regularize_weight')\n grow = params.get('grows')\n kernel = params.get('kernels')\n layer_param = params.get('layer_param')\n layer_param['training'] = training\n output_map = dmb.DenseNetBlockLayers(\n output_map,\n param_dtype,\n grow,\n 'reducor',\n regularize_weight,\n kernel,\n layer_param\n )\n return output_map\n pass\n\n\ndef __base_feature(\n output_map,\n params\n):\n filter = params.get('feature_amount')\n kernel = params.get('kernel')\n stride = params.get('stride')\n param_dtype = tfu.tf_type(params.get('param_dtype')).Type\n regularize_weight = params.get('regularize_weight')\n\n output_map = tf.layers.conv2d(\n output_map,\n filter,\n kernel,\n stride,\n \"same\",\n activation=tf.nn.relu,\n kernel_initializer=tf.variance_scaling_initializer(mode='fan_avg', dtype=param_dtype),\n kernel_regularizer=layers.l2_regularizer(regularize_weight),\n bias_initializer=tf.zeros_initializer(dtype=param_dtype),\n bias_regularizer=layers.l2_regularizer(regularize_weight),\n name='base'\n )\n return output_map\n pass\n\n\ndef __DenseUNet(\n output_map,\n training,\n DenseUNetConfig\n):\n \"\"\"\n\n :param output_map:\n :param training:\n :param DenseUNetConfig:\n# {\n# \"BaseModel\": \"DenseNet\",\n# \"BaseFeature\":{\n# \"feature_amount\": 32,\n# \"kernel\": [3, 3],\n# \"stride\": [1, 1],\n# \"param_dtype\": 0.32,\n# \"regularize_weight\": 0.0001\n# },\n# \"DenseNet\":[\n# {\n# \"param_dtype\": 0.32,\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n# \"pool_kernel\": [2, 2],\n# \"pool_stride\": [2, 2],\n# \"pool_type\": \"max\",\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# }\n# },\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\": {\n# \"type\": \"ReLU\"\n# },\n# \"compress_rate\": null,\n# \"dropout_rate\": 0.1\n# }\n# },\n# {\n# \"param_dtype\": 0.32,\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n# \"pool_kernel\": [2, 2],\n# \"pool_stride\": [2, 2],\n# \"pool_type\": \"max\",\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# }\n# },\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\": {\n# \"type\": \"ReLU\"\n# },\n# \"compress_rate\": null,\n# \"dropout_rate\": 0.1\n# }\n# },\n# {\n# \"param_dtype\": 0.32,\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n# \"pool_kernel\": [2, 2],\n# \"pool_stride\": [2, 2],\n# \"pool_type\": \"max\",\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# }\n# },\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\": {\n# \"type\": \"ReLU\"\n# },\n# \"compress_rate\": null,\n# \"dropout_rate\": 0.1\n# }\n# },\n# {\n# \"param_dtype\": 0.32,\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n# \"pool_kernel\": [2, 2],\n# \"pool_stride\": [2, 2],\n# \"pool_type\": \"max\",\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# }\n# },\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\": {\n# \"type\": \"ReLU\"\n# },\n# \"compress_rate\": null,\n# \"dropout_rate\": 0.1\n# }\n# }\n# ],\n# \"DeDenseNet\":[\n# {\n# \"param_dtype\": 0.32,\n#\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n#\n# \"t_kernel\": [3, 3],\n# \"t_stride\": [2, 2],\n# \"compress_rate\": 0.3,\n#\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate\":{\n# \"type\": \"ReLU\"\n# }\n# },\n#\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# },\n# \"dropout_rate\": 0.1\n# }\n# },\n# {\n# \"param_dtype\": 0.32,\n#\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n#\n# \"t_kernel\": [3, 3],\n# \"t_stride\": [2, 2],\n# \"compress_rate\": 0.3,\n#\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate\":{\n# \"type\": \"ReLU\"\n# }\n# },\n#\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# },\n# \"dropout_rate\": 0.1\n# }\n# },\n# {\n# \"param_dtype\": 0.32,\n#\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n#\n# \"t_kernel\": [3, 3],\n# \"t_stride\": [2, 2],\n# \"compress_rate\": 0.3,\n#\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate\":{\n# \"type\": \"ReLU\"\n# }\n# },\n#\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# },\n# \"dropout_rate\": 0.1\n# }\n# },\n# {\n# \"param_dtype\": 0.32,\n#\n# \"grows\": [3, 3, 3],\n# \"regularize_weight\": 0.0001,\n# \"kernels\": [[3, 3], [3, 3], [3, 3]],\n#\n# \"t_kernel\": [3, 3],\n# \"t_stride\": [2, 2],\n# \"compress_rate\": 0.3,\n#\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate\":{\n# \"type\": \"ReLU\"\n# }\n# },\n#\n# \"transition_param\":{\n# \"batch_normal\": true,\n# \"activate_param\":{\n# \"type\": \"ReLU\"\n# },\n# \"dropout_rate\": 0.1\n# }\n# }\n# ],\n# \"BlockReducor\":{\n# \"param_dtype\": 0.32,\n# \"regularize_weight\": 0.0001,\n# \"grows\": [3, 2, 1],\n# \"kernels\": [[1, 1], [2, 2], [3, 3]],\n# \"layer_param\":{\n# \"batch_normal\": true,\n# \"activate\":{\n# \"type\": \"ReLU\"\n# }\n# }\n# }\n# }\n\n :return:\n \"\"\"\n BaseFeature = DenseUNetConfig.get('BaseFeature')\n DenseNetConfig = DenseUNetConfig.get('DenseNet')\n DeDenseNetConfig = DenseUNetConfig.get('DeDenseNet')\n BlockReducor = DenseUNetConfig.get('BlockReducor')\n\n output_map = __base_feature(output_map, BaseFeature)\n\n cl = dmb.DenseNetProvide()\n cld = dmb.DeDenseNetProvide()\n output_map = dmb.DenseNetFromParamDict(\n output_map,\n training,\n DenseNetConfig,\n dense_net_provide=cl,\n block_name_flag='encode-')\n block_layer_want = cl.BlockLayer[-1][-1]\n cl.BlockLayer.reverse()\n\n output_map = __reducor_for_DenseUNet(output_map, training, BlockReducor)\n\n de_block_name = 0\n for encode_block_layer in zip(cl.BlockLayer, DeDenseNetConfig):\n DeDenseNetBlockConfig = encode_block_layer[1]\n param_dtype = tfu.tf_type(DeDenseNetBlockConfig.get('param_dtype')).Type\n grows = DeDenseNetBlockConfig.get('grows')\n regularize_weight = DeDenseNetBlockConfig.get('regularize_weight')\n kernels = DeDenseNetBlockConfig.get('kernels')\n t_kernel = DeDenseNetBlockConfig.get('t_kernel')\n t_stride = DeDenseNetBlockConfig.get('t_stride')\n compress_rate = DeDenseNetBlockConfig.get('compress_rate')\n layer_param = DeDenseNetBlockConfig.get('layer_param')\n layer_param['training'] = training\n transition_param = DeDenseNetBlockConfig.get('transition_param')\n transition_param['training'] = training\n to_concat = encode_block_layer[0][-1]\n cld.push_block()\n output_map = dmb.DeDenseNetBlockTransition(\n output_map,\n param_dtype,\n 'decode_{0}_{1}'.format(de_block_name, 'transition'),\n regularize_weight,\n t_kernel,\n t_stride,\n compress_rate,\n **transition_param\n )\n output_map = tf.concat(\n [to_concat, output_map],\n axis=-1,\n name='decode_{0}_{1}'.format(de_block_name, 'concat'))\n cld.push_transition(output_map)\n output_map = dmb.DeDenseNetBlockLayers(\n output_map,\n param_dtype,\n grows,\n 'decode_{0}_{1}'.format(de_block_name, 'block_layer'),\n regularize_weight,\n kernels,\n layer_param,\n )\n cld.push_block_layer(output_map)\n de_block_name += 1\n pass\n return output_map\n pass\n\n\ndef DenseUNetPro(\n output_map,\n training,\n class_amount,\n param_dtype,\n regularizer_weight,\n DenseUNetConfig,\n):\n output_map = __DenseUNet(\n output_map,\n training,\n DenseUNetConfig\n )\n output_map = __conv_pixel_wise_class_pro(\n output_map,\n class_amount,\n \"fcn\",\n param_dtype,\n regularizer_weight\n )\n # output_map = tf.reduce_max(output_map, axis=-1, keepdims=True, name='pixel_class')\n return output_map\n pass\n\n\n# todo: calc the miou\ndef fcn_calc_miou(\n logit,\n gt\n):\n pass\n\n\ndef __conv_pixel_wise_class_pro(output_map, class_amount, name, param_dtype, regularize_weight, **options):\n with tf.variable_scope('{0}_pixel_wise_class_pro'.format(name)):\n output_map = tf.layers.conv2d(\n output_map,\n filters=class_amount,\n kernel_size=[1, 1],\n strides=[1, 1],\n kernel_initializer=tf.variance_scaling_initializer(mode='fan_avg', dtype=param_dtype),\n bias_initializer=tf.variance_scaling_initializer(mode='fan_avg', dtype=param_dtype),\n kernel_regularizer=layers.l2_regularizer(regularize_weight),\n bias_regularizer=layers.l2_regularizer(regularize_weight),\n use_bias=True,\n name='conv'\n )\n with tf.variable_scope('ac'):\n alpha = tf.Variable(0.1, trainable=True)\n output_map = tf.nn.leaky_relu(output_map, alpha, name='PReLU')\n pass\n pass\n return output_map\n pass\n\n\ndef fcn_acc(logits, label):\n with tf.name_scope(\"fcn_acc\"):\n pro = tf.arg_max(logits, -1)\n shape = tf.shape(pro)\n sub_shape = tf.slice(shape, [1], [-1])\n pixel_count = 0.5 * tf.cast((tf.square(tf.reduce_sum(sub_shape)) - tf.reduce_sum(sub_shape * sub_shape)), tf.float32)\n l = tf.arg_max(label, -1)\n no_zeros_count = tf.cast(tf.count_nonzero(pro - l, axis=-1), tf.float32)\n one_batch_sum = 1 - tf.reduce_sum(no_zeros_count, axis=-1) / pixel_count\n acc = tf.reduce_mean(one_batch_sum, axis=0)\n return acc\n pass\n\n\ndef fcn_loss(logits, label, cost_name, param_dtype, **options):\n \"\"\"\n\n :param logits:\n :param label:\n :param cost_name:\n :param param_dtype:\n :param options:\n :return:\n \"\"\"\n class_amount = logits.get_shape().as_list()[-1]\n with tf.name_scope(\"fcn_loss\"):\n # flat_logits = tf.reshape(logits, [-1, class_amount])\n # flat_labels = tf.reshape(label, [-1, class_amount])\n flat_logits = logits\n flat_labels = label\n if cost_name == \"cross_entropy\":\n class_weights = options.pop(\"class_weights\", None)\n\n if class_weights is not None:\n class_weights = tf.constant(np.array(class_weights, dtype=npu.np_type(param_dtype).Type))\n\n # class weights is a 1-D array , here create the total weight map for weighting\n weight_map = tf.multiply(flat_labels, class_weights)\n # weight_map = tf.reduce_sum(weight_map, axis=-1)\n\n # calc entropy loss cross the dims[-1]\n loss_map = tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits,\n labels=flat_labels)\n\n # make weighting\n # weighted_loss = tf.multiply(loss_map, weight_map)\n\n weighted_loss = loss_map\n # loss = tf.reduce_mean(weighted_loss)\n loss = tf.reduce_mean(tf.reduce_mean(weighted_loss, axis=0))\n else:\n loss = tf.reduce_sum(\n tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=flat_logits,\n labels=flat_labels),\n axis=0\n ))\n elif cost_name == \"dice_coefficient\":\n eps = 1e-5\n # softmax the logits\n prediction = pixel_wise_softmax(logits)\n intersection = tf.reduce_sum(prediction * label)\n union = eps + tf.reduce_sum(prediction) + tf.reduce_sum(label)\n loss = -(2 * intersection / (union))\n\n else:\n raise ValueError(\"Unknown cost function: \" % cost_name)\n return loss\n pass\n"
] | [
[
"tensorflow.reduce_min",
"tensorflow.nn.conv2d",
"tensorflow.clip_by_value",
"tensorflow.stack",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.nn.leaky_relu",
"tensorflow.subtract",
"tensorflow.summary.histogram",
"tensorflow.Variable",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.variable_scope",
"numpy.sqrt",
"tensorflow.nn.conv2d_transpose",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool",
"tensorflow.nn.dropout",
"tensorflow.variance_scaling_initializer",
"tensorflow.nn.relu",
"tensorflow.truncated_normal",
"tensorflow.count_nonzero",
"tensorflow.name_scope",
"tensorflow.reduce_sum",
"tensorflow.arg_max",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.zeros_initializer",
"tensorflow.multiply",
"tensorflow.reduce_max",
"tensorflow.div",
"tensorflow.slice",
"tensorflow.reduce_mean"
]
] |
jmclong/random-fourier-features-pytorch | [
"22ad7db8387c3df397e828e93091dc26ba4c2b9b"
] | [
"rff/dataloader.py"
] | [
"import torch\nimport torchvision\n\nfrom torch import Tensor\nfrom torch.utils.data.dataset import TensorDataset\n\n\ndef rectangular_coordinates(size: tuple) -> Tensor:\n r\"\"\"Creates a tensor of equally spaced coordinates for use with an image or volume\n\n Args:\n size (tuple): shape of the image or volume\n\n Returns:\n Tensor: tensor of shape :math:`(*\\text{size}, \\text{len(size)})`\n \"\"\"\n def linspace_func(nx): return torch.linspace(0.0, 1.0, nx)\n linspaces = map(linspace_func, size)\n coordinates = torch.meshgrid(*linspaces)\n return torch.stack(coordinates, dim=-1)\n\n\ndef to_dataset(path: str) -> TensorDataset:\n image = torchvision.io.read_image(path).float()\n _, H, W = image.shape\n coords = rectangular_coordinates((H, W))\n image = image.permute((1, 2, 0))\n image /= 255.0\n coords = coords.flatten(0, -2)\n image = image.flatten(0, -2)\n return TensorDataset(coords, image)\n"
] | [
[
"torch.meshgrid",
"torch.stack",
"torch.linspace",
"torch.utils.data.dataset.TensorDataset"
]
] |
aslam/concept-to-clinic | [
"b69a6631ad007c5eca5280169c1db96444fd39ff"
] | [
"prediction/src/preprocess/lung_segmentation.py"
] | [
"import logging\nimport math\nimport os\nimport sys\n\nimport cv2\nimport dicom\nimport numpy\nimport scipy\nfrom dicom.errors import InvalidDicomError\nfrom skimage.filters import roberts\nfrom skimage.measure import label, regionprops\nfrom skimage.morphology import disk, binary_erosion, binary_closing\nfrom skimage.segmentation import clear_border\n\n\ndef get_z_range(dicom_path):\n \"\"\"Return the Z range of the images in the DICOM path\n e.g. -379.0, -31.5\n\n Args:\n dicom_path: path that contains DICOM images\n\n Returns:\n minimum and maximum of the Z range\n \"\"\"\n slices = load_patient(dicom_path)\n min_z, max_z = sys.float_info.max, sys.float_info.min\n for slice in slices:\n z = float(slice.ImagePositionPatient[2])\n if z < min_z:\n min_z = z\n if z > max_z:\n max_z = z\n return min_z, max_z\n\n\ndef save_lung_segments(dicom_path, patient_id):\n \"\"\"Write the converted scan images and related lung masks to EXTRACTED_IMAGE_DIR.\n\n Args:\n dicom_path: a path to a DICOM directory\n patient_id: SeriesInstanceUID of the patient\n\n Returns:\n Original patient images (z, x, y),\n Rescaled mask images (z, x, y)\n \"\"\"\n EXTRACTED_IMAGE_DIR = \"data/extracted/\"\n TARGET_VOXEL_MM = 1.00\n target_dir = os.path.join(os.getcwd(), EXTRACTED_IMAGE_DIR, patient_id)\n os.makedirs(target_dir, exist_ok=True)\n\n slices = load_patient(dicom_path)\n\n cos_value = (slices[0].ImageOrientationPatient[0])\n cos_degree = round(math.degrees(math.acos(cos_value)), 2)\n\n original_image = get_pixels_hu(slices)\n invert_order = slices[1].ImagePositionPatient[2] > slices[0].ImagePositionPatient[2]\n\n pixel_spacing = slices[0].PixelSpacing\n pixel_spacing.append(slices[0].SliceThickness)\n image = rescale_patient_images(original_image, pixel_spacing, TARGET_VOXEL_MM)\n if not invert_order:\n image = numpy.flipud(image)\n\n for index, org_img in enumerate(image):\n patient_dir = target_dir\n os.makedirs(patient_dir, exist_ok=True)\n img_path = patient_dir + \"img_\" + str(index).rjust(4, '0') + \"_i.png\"\n # if there exists slope,rotation image with corresponding degree\n if cos_degree > 0.0:\n org_img = cv_flip(org_img, org_img.shape[1], org_img.shape[0], cos_degree)\n img, mask = get_segmented_lungs(org_img.copy())\n org_img = normalize_hu(org_img)\n cv2.imwrite(img_path, org_img * 255)\n cv2.imwrite(img_path.replace(\"_i.png\", \"_m.png\"), mask * 255)\n\n return original_image, image\n\n\ndef load_patient(src_dir):\n slices = []\n for s in os.listdir(src_dir):\n try:\n dicom_slice = dicom.read_file(os.path.join(src_dir, s))\n except InvalidDicomError:\n logging.error(\"{} is no valid DICOM\".format(s))\n else:\n slices.append(dicom_slice)\n slices.sort(key=lambda x: int(x.InstanceNumber))\n try:\n slice_thickness = numpy.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])\n except IndexError as e:\n slice_thickness = numpy.abs(slices[0].SliceLocation - slices[1].SliceLocation)\n\n for s in slices:\n s.SliceThickness = slice_thickness\n return slices\n\n\ndef get_pixels_hu(slices):\n image = numpy.stack([s.pixel_array for s in slices])\n image = image.astype(numpy.int16)\n image[image == -2000] = 0\n for slice_number in range(len(slices)):\n intercept = slices[slice_number].RescaleIntercept\n slope = slices[slice_number].RescaleSlope\n if slope != 1:\n image[slice_number] = slope * image[slice_number].astype(numpy.float64)\n image[slice_number] = image[slice_number].astype(numpy.int16)\n image[slice_number] += numpy.int16(intercept)\n\n return numpy.array(image, dtype=numpy.int16)\n\n\ndef normalize_hu(image):\n MIN_BOUND = -1000.0\n MAX_BOUND = 400.0\n image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)\n image[image > 1] = 1.\n image[image < 0] = 0.\n return image\n\n\ndef get_segmented_lungs(im):\n # Step 1: Convert into a binary image.\n binary = im < -400\n # Step 2: Remove the blobs connected to the border of the image.\n cleared = clear_border(binary)\n # Step 3: Label the image.\n label_image = label(cleared)\n # Step 4: Keep the labels with 2 largest areas.\n areas = [r.area for r in regionprops(label_image)]\n areas.sort()\n if len(areas) > 2:\n for region in regionprops(label_image):\n if region.area < areas[-2]:\n for coordinates in region.coords:\n label_image[coordinates[0], coordinates[1]] = 0\n binary = label_image > 0\n # Step 5: Erosion operation with a disk of radius 2.\n # This operation is seperate the lung nodules attached to the blood vessels.\n selem = disk(2)\n binary = binary_erosion(binary, selem)\n # Step 6: Closure operation with a disk of radius 10.\n # This operation is to keep nodules attached to the lung wall.\n selem = disk(10) # CHANGE BACK TO 10\n binary = binary_closing(binary, selem)\n # Step 7: Fill in the small holes inside the binary mask of lungs.\n edges = roberts(binary)\n binary = scipy.ndimage.binary_fill_holes(edges)\n # Step 8: Superimpose the binary mask on the input image.\n get_high_vals = binary == 0\n im[get_high_vals] = -2000\n return im, binary\n\n\ndef cv_flip(img, cols, rows, degree):\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), degree, 1.0)\n dst = cv2.warpAffine(img, M, (cols, rows))\n return dst\n\n\ndef rescale_patient_images(images_zyx, org_spacing_xyz, target_voxel_mm, is_mask_image=False):\n resize_x = 1.0\n resize_y = org_spacing_xyz[2] / target_voxel_mm\n interpolation = cv2.INTER_NEAREST if is_mask_image else cv2.INTER_LINEAR\n res = cv2.resize(images_zyx, dsize=None, fx=resize_x, fy=resize_y,\n interpolation=interpolation) # opencv assumes y, x, channels umpy array, so y = z pfff\n\n res = res.swapaxes(0, 2)\n res = res.swapaxes(0, 1)\n\n resize_x = org_spacing_xyz[0] / target_voxel_mm\n resize_y = org_spacing_xyz[1] / target_voxel_mm\n # cv2 can handle max 512 channels..\n if res.shape[2] > 512:\n res = res.swapaxes(0, 2)\n res1 = res[:256]\n res2 = res[256:]\n res1 = res1.swapaxes(0, 2)\n res2 = res2.swapaxes(0, 2)\n res1 = cv2.resize(res1, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)\n res2 = cv2.resize(res2, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)\n res1 = res1.swapaxes(0, 2)\n res2 = res2.swapaxes(0, 2)\n res = numpy.vstack([res1, res2])\n res = res.swapaxes(0, 2)\n else:\n res = cv2.resize(res, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)\n res = res.swapaxes(0, 2)\n res = res.swapaxes(2, 1)\n return res\n"
] | [
[
"numpy.array",
"numpy.flipud",
"numpy.stack",
"scipy.ndimage.binary_fill_holes",
"numpy.abs",
"numpy.vstack",
"numpy.int16"
]
] |
GxvgiuU/networkx | [
"3f1fdcb7693ff152f17623ce549526ec272698b1"
] | [
"examples/graph/plot_erdos_renyi.py"
] | [
"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\"\"\"\n===========\nErdos Renyi\n===========\n\nCreate an G{n,m} random graph with n nodes and m edges\nand report some properties.\n\nThis graph is sometimes called the Erdős-Rényi graph\nbut is different from G{n,p} or binomial_graph which is also\nsometimes called the Erdős-Rényi graph.\n\"\"\"\n# Author: Aric Hagberg ([email protected])\n\n# Copyright (C) 2004-2018 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom networkx import nx\n\nn = 10 # 10 nodes\nm = 20 # 20 edges\n\nG = nx.gnm_random_graph(n, m)\n\n# some properties\nprint(\"node degree clustering\")\nfor v in nx.nodes(G):\n print('%s %d %f' % (v, nx.degree(G, v), nx.clustering(G, v)))\n\n# print the adjacency list to terminal\ntry:\n nx.write_adjlist(G, sys.stdout)\nexcept TypeError: # Python 3.x\n nx.write_adjlist(G, sys.stdout.buffer)\n\nnx.draw(G)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show"
]
] |
mgxd/niworkflows | [
"d28857d0be2a63263e4c29af44e84d18fdc44d2f"
] | [
"niworkflows/tests/test_segmentation.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\" Segmentation tests \"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom shutil import copy\nimport pytest\n\nfrom niworkflows.nipype.interfaces.base import Bunch\nfrom niworkflows.interfaces.segmentation import FASTRPT, ReconAllRPT\nfrom niworkflows.interfaces.masks import (\n BETRPT, BrainExtractionRPT, SimpleShowMaskRPT, ROIsPlot\n)\nfrom .conftest import _run_interface_mock, datadir\n\n\ndef _smoke_test_report(report_interface, artifact_name):\n report_interface.run()\n out_report = report_interface.inputs.out_report\n\n save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False)\n if save_artifacts:\n copy(out_report, os.path.join(save_artifacts, artifact_name))\n assert os.path.isfile(out_report), 'Report \"%s\" does not exist' % out_report\n\n\ndef test_BETRPT(moving):\n \"\"\" the BET report capable test \"\"\"\n bet_rpt = BETRPT(generate_report=True, in_file=moving)\n _smoke_test_report(bet_rpt, 'testBET.svg')\n\n\ndef test_ROIsPlot(oasis_dir):\n \"\"\" the BET report capable test \"\"\"\n import nibabel as nb\n import numpy as np\n\n labels = nb.load(os.path.join(oasis_dir, 'T_template0_glm_4labelsJointFusion.nii.gz'))\n data = labels.get_data()\n out_files = []\n ldata = np.zeros_like(data)\n for i, l in enumerate([1, 3, 4, 2]):\n ldata[data == l] = 1\n out_files.append(os.path.abspath('label%d.nii.gz' % i))\n lfile = nb.Nifti1Image(ldata, labels.affine, labels.header)\n lfile.to_filename(out_files[-1])\n\n roi_rpt = ROIsPlot(\n generate_report=True,\n in_file=os.path.join(oasis_dir, 'T_template0.nii.gz'),\n in_mask=out_files[-1],\n in_rois=out_files[:-1],\n colors=['g', 'y']\n )\n _smoke_test_report(roi_rpt, 'testROIsPlot.svg')\n\n\ndef test_SimpleShowMaskRPT(oasis_dir):\n \"\"\" the BET report capable test \"\"\"\n\n msk_rpt = SimpleShowMaskRPT(\n generate_report=True,\n background_file=os.path.join(oasis_dir, 'T_template0.nii.gz'),\n mask_file=os.path.join(oasis_dir, 'T_template0_BrainCerebellumRegistrationMask.nii.gz')\n )\n _smoke_test_report(msk_rpt, 'testSimpleMask.svg')\n\n\ndef test_BrainExtractionRPT(monkeypatch, oasis_dir, moving, nthreads):\n \"\"\" test antsBrainExtraction with reports\"\"\"\n\n def _agg(objekt, runtime):\n outputs = Bunch(BrainExtractionMask=os.path.join(\n datadir, 'testBrainExtractionRPTBrainExtractionMask.nii.gz')\n )\n return outputs\n\n # Patch the _run_interface method\n monkeypatch.setattr(BrainExtractionRPT, '_run_interface',\n _run_interface_mock)\n monkeypatch.setattr(BrainExtractionRPT, 'aggregate_outputs',\n _agg)\n\n bex_rpt = BrainExtractionRPT(\n generate_report=True,\n dimension=3,\n use_floatingpoint_precision=1,\n anatomical_image=moving,\n brain_template=os.path.join(oasis_dir, 'T_template0.nii.gz'),\n brain_probability_mask=os.path.join(\n oasis_dir, 'T_template0_BrainCerebellumProbabilityMask.nii.gz'),\n extraction_registration_mask=os.path.join(\n oasis_dir, 'T_template0_BrainCerebellumRegistrationMask.nii.gz'),\n out_prefix='testBrainExtractionRPT',\n debug=True, # run faster for testing purposes\n num_threads=nthreads\n )\n _smoke_test_report(bex_rpt, 'testANTSBrainExtraction.svg')\n\n\[email protected](\"segments\", [True, False])\ndef test_FASTRPT(monkeypatch, segments, reference, reference_mask):\n \"\"\" test FAST with the two options for segments \"\"\"\n from niworkflows.nipype.interfaces.fsl.maths import ApplyMask\n\n def _agg(objekt, runtime):\n outputs = Bunch(tissue_class_map=os.path.join(\n datadir, 'testFASTRPT-tissue_class_map.nii.gz'),\n tissue_class_files=[\n os.path.join(datadir, 'testFASTRPT-tissue_class_files0.nii.gz'),\n os.path.join(datadir, 'testFASTRPT-tissue_class_files1.nii.gz'),\n os.path.join(datadir, 'testFASTRPT-tissue_class_files2.nii.gz')]\n )\n return outputs\n\n # Patch the _run_interface method\n monkeypatch.setattr(FASTRPT, '_run_interface',\n _run_interface_mock)\n monkeypatch.setattr(FASTRPT, 'aggregate_outputs',\n _agg)\n\n brain = ApplyMask(\n in_file=reference, mask_file=reference_mask).run().outputs.out_file\n fast_rpt = FASTRPT(\n in_files=brain,\n generate_report=True,\n no_bias=True,\n probability_maps=True,\n segments=segments,\n out_basename='test'\n )\n _smoke_test_report(\n fast_rpt, 'testFAST_%ssegments.svg' % ('no' * int(not segments)))\n\n\ndef test_ReconAllRPT(monkeypatch):\n # Patch the _run_interface method\n monkeypatch.setattr(ReconAllRPT, '_run_interface',\n _run_interface_mock)\n\n rall_rpt = ReconAllRPT(\n subject_id='fsaverage',\n directive='all',\n subjects_dir=os.getenv('SUBJECTS_DIR'),\n generate_report=True\n )\n\n _smoke_test_report(rall_rpt, 'testReconAll.svg')\n"
] | [
[
"numpy.zeros_like"
]
] |
UnifyID/humandetect-sample-flask | [
"32cfa4850f9be31cefab9beb95e6e1d2e6904008"
] | [
"app/main.py"
] | [
"# Copyright (c) 2020 UnifyID, Inc. All rights reserved.\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\nimport os\nimport requests\nimport json\nfrom flask import Flask, request, jsonify\nimport numpy as np\nimport cv2\nfrom tensorflow.keras.applications import MobileNet\nfrom tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions\nfrom tensorflow.keras.preprocessing import image\n\napp = Flask(__name__)\nmodel = MobileNet(weights='imagenet')\nUPLOAD_FOLDER = './uploads'\nHEADERS = {\n 'Content-Type': 'application/json',\n 'X-API-Key': os.environ['UnifyIDAPIKey'],\n}\n\nif not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n file = request.files['file']\n token = request.form['token']\n\n if not file:\n return \"Error: file not found in request\"\n\n if not token:\n return \"Error: token not found in request\"\n\n print(\"token:\", token)\n\n hd_response = requests.post('https://api.unify.id/v1/humandetect/verify', headers=HEADERS, data=json.dumps({\"token\": token}))\n\n if hd_response.status_code == 400:\n return \"Error: invalid HumanDetect token\"\n\n hd_json = hd_response.json()\n\n if \"valid\" not in hd_json or not hd_json[\"valid\"]:\n return \"Error: HumanDetect verification failed\"\n\n try:\n print(\"valid human:\", hd_response.json()[\"valid\"])\n\n filename = os.path.join(UPLOAD_FOLDER, \"image.png\")\n file.save(filename)\n\n img = cv2.imread(filename)\n img = cv2.resize(img, (224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = decode_predictions(model.predict(x), top=5)[0]\n\n preds_formatted = \", \".join([\n f\"{class_description}: {score*100:.2f}%\"\n for (_, class_description, score) in preds\n ])\n\n print(\"predictions: \", preds_formatted, \"\\n\")\n return preds_formatted\n\n except Exception as e:\n print(e)\n return \"Error: prediction failed, try again later\"\n\n else:\n return \"<h1>HumanDetect Example</h1>\"\n"
] | [
[
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.applications.MobileNet",
"numpy.expand_dims",
"tensorflow.keras.applications.mobilenet.preprocess_input"
]
] |
alexivaner/sklearn-onnx | [
"535a79481a79964287430bb390912c16911cff01"
] | [
"tests/test_sklearn_adaboost_converter.py"
] | [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport unittest\nfrom distutils.version import StrictVersion\nimport onnx\nfrom onnx.defs import onnx_opset_version\nimport onnxruntime\nfrom sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom skl2onnx import convert_sklearn\nfrom skl2onnx.common.data_types import (\n BooleanTensorType,\n FloatTensorType,\n Int64TensorType,\n)\nfrom skl2onnx.common.data_types import onnx_built_with_ml\nfrom test_utils import (\n dump_data_and_model,\n fit_classification_model,\n fit_regression_model,\n)\n\n\nclass TestSklearnAdaBoostModels(unittest.TestCase):\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_classifier_samme_r(self):\n model, X_test = fit_classification_model(AdaBoostClassifier(\n n_estimators=10, algorithm=\"SAMME.R\", random_state=42,\n base_estimator=DecisionTreeClassifier(\n max_depth=2, random_state=42)), 3)\n model_onnx = convert_sklearn(\n model,\n \"AdaBoost classification\",\n [(\"input\", FloatTensorType((None, X_test.shape[1])))],\n target_opset=10\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X_test,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostClassifierSAMMER\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__)\"\n \"<= StrictVersion('0.2.1')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_classifier_samme_r_decision_function(self):\n model, X_test = fit_classification_model(AdaBoostClassifier(\n n_estimators=10, algorithm=\"SAMME.R\", random_state=42,\n base_estimator=DecisionTreeClassifier(\n max_depth=2, random_state=42)), 4)\n options = {id(model): {'raw_scores': True}}\n model_onnx = convert_sklearn(\n model,\n \"AdaBoost classification\",\n [(\"input\", FloatTensorType((None, X_test.shape[1])))],\n target_opset=10,\n options=options,\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X_test,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostClassifierSAMMERDecisionFunction\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__)\"\n \"<= StrictVersion('0.2.1')\",\n methods=['predict', 'decision_function'],\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_classifier_samme_r_logreg(self):\n model, X_test = fit_classification_model(AdaBoostClassifier(\n n_estimators=5, algorithm=\"SAMME.R\",\n base_estimator=LogisticRegression(\n solver='liblinear')), 4)\n model_onnx = convert_sklearn(\n model,\n \"AdaBoost classification\",\n [(\"input\", FloatTensorType((None, X_test.shape[1])))],\n target_opset=10\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X_test,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostClassifierSAMMERLogReg\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__)\"\n \"<= StrictVersion('0.2.1')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_classifier_samme(self):\n model, X_test = fit_classification_model(AdaBoostClassifier(\n n_estimators=5, algorithm=\"SAMME\", random_state=42,\n base_estimator=DecisionTreeClassifier(\n max_depth=6, random_state=42)), 2)\n model_onnx = convert_sklearn(\n model,\n \"AdaBoostClSamme\",\n [(\"input\", FloatTensorType((None, X_test.shape[1])))],\n target_opset=10,\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X_test,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostClassifierSAMMEDT\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__)\"\n \"< StrictVersion('0.5.0')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_classifier_samme_decision_function(self):\n model, X_test = fit_classification_model(AdaBoostClassifier(\n n_estimators=5, algorithm=\"SAMME\", random_state=42,\n base_estimator=DecisionTreeClassifier(\n max_depth=6, random_state=42)), 2)\n options = {id(model): {'raw_scores': True}}\n model_onnx = convert_sklearn(\n model,\n \"AdaBoostClSamme\",\n [(\"input\", FloatTensorType((None, X_test.shape[1])))],\n target_opset=10,\n options=options,\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X_test,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostClassifierSAMMEDTDecisionFunction\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__)\"\n \"< StrictVersion('0.5.0')\",\n methods=['predict', 'decision_function_binary'],\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_classifier_lr(self):\n model, X_test = fit_classification_model(\n AdaBoostClassifier(learning_rate=0.3, random_state=42), 3,\n is_int=True)\n model_onnx = convert_sklearn(\n model,\n \"AdaBoost classification\",\n [(\"input\", Int64TensorType((None, X_test.shape[1])))],\n target_opset=10\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X_test,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostClassifierLR\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__)\"\n \"<= StrictVersion('0.2.1')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_classifier_bool(self):\n model, X_test = fit_classification_model(\n AdaBoostClassifier(random_state=42), 3,\n is_bool=True)\n model_onnx = convert_sklearn(\n model,\n \"AdaBoost classification\",\n [(\"input\", BooleanTensorType((None, X_test.shape[1])))],\n target_opset=10,\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X_test,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostClassifierBool\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__)\"\n \"<= StrictVersion('0.2.1')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_regressor(self):\n model, X = fit_regression_model(\n AdaBoostRegressor(n_estimators=5))\n model_onnx = convert_sklearn(\n model, \"AdaBoost regression\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n target_opset=10)\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostRegressor-Dec4\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__) \"\n \"< StrictVersion('0.5.0') or \"\n \"StrictVersion(onnx.__version__) \"\n \"== StrictVersion('1.4.1')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_regressor_lreg(self):\n model, X = fit_regression_model(\n AdaBoostRegressor(n_estimators=5,\n base_estimator=LinearRegression()))\n model_onnx = convert_sklearn(\n model, \"AdaBoost regression\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n target_opset=10)\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostRegressorLReg-Dec4\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__) \"\n \"< StrictVersion('0.5.0') or \"\n \"StrictVersion(onnx.__version__) \"\n \"== StrictVersion('1.4.1')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_regressor_int(self):\n model, X = fit_regression_model(\n AdaBoostRegressor(n_estimators=5), is_int=True)\n model_onnx = convert_sklearn(\n model, \"AdaBoost regression\",\n [(\"input\", Int64TensorType([None, X.shape[1]]))],\n target_opset=10)\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostRegressorInt\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__) \"\n \"< StrictVersion('0.5.0') or \"\n \"StrictVersion(onnx.__version__) \"\n \"== StrictVersion('1.4.1')\",\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_regressor_lr10(self):\n model, X = fit_regression_model(\n AdaBoostRegressor(learning_rate=0.5, random_state=42))\n model_onnx = convert_sklearn(\n model, \"AdaBoost regression\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n target_opset=10)\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostRegressorLR-Dec4\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__) \"\n \"< StrictVersion('0.5.0') or \"\n \"StrictVersion(onnx.__version__) \"\n \"== StrictVersion('1.4.1')\",\n verbose=False\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnxruntime.__version__) <\n StrictVersion(\"0.5.9999\")),\n reason=\"not available\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_regressor_lr11(self):\n model, X = fit_regression_model(\n AdaBoostRegressor(learning_rate=0.5, random_state=42))\n if onnx_opset_version() < 11:\n try:\n convert_sklearn(\n model, \"AdaBoost regression\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n target_opset=11)\n except RuntimeError:\n return\n model_onnx = convert_sklearn(\n model, \"AdaBoost regression\",\n [(\"input\", FloatTensorType([None, X.shape[1]]))],\n target_opset=11)\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostRegressorLR-Dec4\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__) \"\n \"< StrictVersion('0.5.9999') or \"\n \"StrictVersion(onnx.__version__) \"\n \"== StrictVersion('1.4.1')\",\n verbose=False\n )\n\n @unittest.skipIf(not onnx_built_with_ml(),\n reason=\"Requires ONNX-ML extension.\")\n @unittest.skipIf((StrictVersion(onnx.__version__) <\n StrictVersion(\"1.5.0\")),\n reason=\"not available\")\n def test_ada_boost_regressor_bool(self):\n model, X = fit_regression_model(\n AdaBoostRegressor(learning_rate=0.5, random_state=42),\n is_bool=True)\n model_onnx = convert_sklearn(\n model, \"AdaBoost regression\",\n [(\"input\", BooleanTensorType([None, X.shape[1]]))],\n target_opset=10,\n )\n self.assertIsNotNone(model_onnx)\n dump_data_and_model(\n X,\n model,\n model_onnx,\n basename=\"SklearnAdaBoostRegressorBool\",\n allow_failure=\"StrictVersion(\"\n \"onnxruntime.__version__) \"\n \"< StrictVersion('0.5.0') or \"\n \"StrictVersion(onnx.__version__) \"\n \"== StrictVersion('1.4.1')\",\n verbose=False,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"sklearn.ensemble.AdaBoostRegressor",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"sklearn.tree.DecisionTreeClassifier"
]
] |
afeinstein20/Eureka | [
"7c330086ff7978b81d0f6ebb83a88c0bee01dc50"
] | [
"eureka/S3_data_reduction/background.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport multiprocessing as mp\nfrom tqdm import tqdm\n\n__all__ = ['BGsubtraction', 'fitbg', 'fitbg2', 'fitbg3']\n\ndef BGsubtraction(data, meta, log, isplots):\n \"\"\"Does background subtraction using inst.fit_bg & background.fitbg\n\n Parameters\n ----------\n data: DataClass\n Data object containing data, uncertainty, and variance arrays in units of MJy/sr or DN/s.\n meta: MetaClass\n The metadata object.\n log: logedit.Logedit\n The open log in which notes from this step can be added.\n isplots: int\n The amount of plots saved; set in ecf.\n\n Returns\n -------\n data: DataClass\n Data object containing background subtracted data.\n \"\"\"\n n_int, bg_y1, bg_y2, subdata, submask = meta.n_int, meta.bg_y1, meta.bg_y2, data.subdata, data.submask\n\n # Load instrument module\n if meta.inst == 'miri':\n from . import miri as inst\n elif meta.inst == 'nircam':\n from . import nircam as inst\n elif meta.inst == 'nirspec':\n from . import nirspec as inst\n elif meta.inst == 'niriss':\n raise ValueError('NIRISS observations are currently unsupported!')\n else:\n raise ValueError('Unknown instrument {}'.format(meta.inst))\n\n # Write background\n def writeBG(arg):\n bg_data, bg_mask, n = arg\n subbg[n] = bg_data\n submask[n] = bg_mask\n return\n\n # Compute background for each integration\n log.writelog(' Performing background subtraction')\n subbg = np.zeros((subdata.shape))\n if meta.ncpu == 1:\n # Only 1 CPU\n for n in tqdm(range(meta.int_start,n_int)):\n # Fit sky background with out-of-spectra data\n writeBG(inst.fit_bg(subdata[n], meta, submask[n], bg_y1, bg_y2, meta.bg_deg, meta.p3thresh, n, isplots))\n else:\n # Multiple CPUs\n pool = mp.Pool(meta.ncpu)\n args_list = []\n for n in range(meta.int_start,n_int):\n args_list.append((subdata[n], meta, submask[n], bg_y1, bg_y2, meta.bg_deg, meta.p3thresh, n, isplots))\n jobs = [pool.apply_async(func=inst.fit_bg, args=(*args,), callback=writeBG) for args in args_list]\n pool.close()\n for job in tqdm(jobs):\n res = job.get()\n\n # 9. Background subtraction\n # Perform background subtraction\n subdata -= subbg\n\n data.subbg, data.submask, data.subdata = subbg, submask, subdata\n\n return data\n\n# STEP 3: Fit sky background with out-of-spectra data\ndef fitbg(dataim, meta, mask, x1, x2, deg=1, threshold=5, isrotate=False, isplots=0):\n '''Fit sky background with out-of-spectra data.\n\n Parameters\n ----------\n dataim: ndarray\n The data array\n meta: MetaClass\n The metadata object.\n mask: ndarray\n A mask array\n x1: ndarray\n x2: ndarray\n deg: int, optional\n Polynomial order for column-by-column background subtraction\n Default is 1.\n threshold: int, optional\n Sigma threshold for outlier rejection during background subtraction.\n Defaullt is 5.\n isrotate: bool, optional\n Default is False.\n isplots: int, optional\n The amount of plots saved; set in ecf. Default is 0.\n\n Notes\n ------\n History:\n\n - May 2013\n Removed [::-1] for LDSS3\n\n - Feb 2014\n Modified x1 and x2 to allow for arrays\n '''\n # Assume x is the spatial direction and y is the wavelength direction\n # Otherwise, rotate array\n if isrotate == 1:\n dataim = dataim[::-1].T\n mask = mask[::-1].T\n elif isrotate == 2:\n dataim = dataim.T\n mask = mask.T\n\n #Convert x1 and x2 to array, if need be\n ny, nx = np.shape(dataim)\n if type(x1) == int or type(x1) == np.int64:\n x1 = np.zeros(ny,dtype=int)+x1\n if type(x2) == int or type(x2) == np.int64:\n x2 = np.zeros(ny,dtype=int)+x2\n\n if deg < 0:\n # Calculate median background of entire frame\n # Assumes all x1 and x2 values are the same\n submask = np.concatenate(( mask[:, :x1[0]].T, mask[:,x2[0]+1:nx].T)).T\n subdata = np.concatenate((dataim[:, :x1[0]].T,dataim[:,x2[0]+1:nx].T)).T\n bg = np.zeros((ny,nx)) + np.median(subdata[np.where(submask)])\n elif deg == None :\n # No background subtraction\n bg = np.zeros((ny,nx))\n else:\n degs = np.ones(ny)*deg\n # Initiate background image with zeros\n bg = np.zeros((ny,nx))\n # Fit polynomial to each column\n for j in range(ny):\n nobadpixels = False\n # Create x indices for background sections of frame\n xvals = np.concatenate((range(x1[j]), range(x2[j]+1,nx))).astype(int)\n # If too few good pixels then average\n if (np.sum(mask[j,:x1[j]]) < deg) or (np.sum(mask[j,x2[j]+1:nx]) < deg):\n degs[j] = 0\n while (nobadpixels == False):\n try:\n goodxvals = xvals[np.where(mask[j,xvals])]\n except:\n print(\"****Warning: Background subtraction failed!****\")\n print(j)\n print(xvals)\n print(np.where(mask[j,xvals]))\n return\n dataslice = dataim[j,goodxvals]\n # Check for at least 1 good x value\n if len(goodxvals) == 0:\n nobadpixels = True #exit while loop\n #Use coefficients from previous row\n else:\n # Fit along spatial direction with a polynomial of degree 'deg'\n coeffs = np.polyfit(goodxvals, dataslice, deg=degs[j])\n # Evaluate model at goodexvals\n model = np.polyval(coeffs, goodxvals)\n # Calculate residuals and number of sigma from the model\n residuals = dataslice - model\n # Simple standard deviation (faster but prone to missing scanned background stars)\n #stdres = np.std(residuals)\n # Median Absolute Deviation (slower but more robust)\n #stdres = np.median(np.abs(np.ediff1d(residuals)))\n # Mean Absolute Deviation (good comprimise)\n stdres = np.mean(np.abs(np.ediff1d(residuals)))\n if stdres == 0:\n stdres = np.inf\n stdevs = np.abs(residuals) / stdres\n # Find worst data point\n loc = np.argmax(stdevs)\n # Mask data point if > threshold\n if stdevs[loc] > threshold:\n mask[j,goodxvals[loc]] = 0\n else:\n nobadpixels = True #exit while loop\n\n # Evaluate background model at all points, write model to background image\n if len(goodxvals) != 0:\n bg[j] = np.polyval(coeffs, range(nx))\n if isplots >= 6:\n plt.figure(3601)\n plt.clf()\n plt.title(str(j))\n plt.plot(goodxvals, dataslice, 'bo')\n plt.plot(range(nx), bg[j], 'g-')\n plt.savefig(meta.outputdir + 'figs/Fig6_BG_'+str(j)+'.png')\n plt.pause(0.01)\n\n if isrotate == 1:\n bg = (bg.T)[::-1]\n mask = (mask.T)[::-1]\n elif isrotate == 2:\n bg = (bg.T)\n mask = (mask.T)\n\n return bg, mask\n\n# STEP 3: Fit sky background with out-of-spectra data\ndef fitbg2(dataim, meta, mask, bgmask, deg=1, threshold=5, isrotate=False, isplots=0):\n '''Fit sky background with out-of-spectra data.\n\n fitbg2 uses bgmask, a mask for the background region which enables fitting more complex\n background regions than simply above or below a given distance from the trace. This will\n help mask the 2nd and 3rd orders of NIRISS.\n\n Parameters\n ----------\n dataim: ndarray\n The data array\n meta: MetaClass\n The metadata object.\n mask: ndarray\n A mask array\n bgmask: ndarray\n A background mask array.\n deg: int, optional\n Polynomial order for column-by-column background subtraction.\n Default is 1.\n threshold: int, optional\n Sigma threshold for outlier rejection during background subtraction.\n Default is 5.\n isrotate: bool, optional\n Default is False.\n isplots: int, optional\n The amount of plots saved; set in ecf. Default is 0.\n\n Notes\n ------\n History:\n\n - September 2016 Kevin Stevenson\n Initial version\n '''\n # Assume x is the spatial direction and y is the wavelength direction\n # Otherwise, rotate array\n if isrotate == 1:\n dataim = dataim[::-1].T\n mask = mask[::-1].T\n bgmask = bgmask[::-1].T\n\n elif isrotate == 2:\n dataim = dataim.T\n mask = mask.T\n bgmask = bgmask.T\n\n # Initiate background image with zeros\n ny, nx = np.shape(dataim)\n bg = np.zeros((ny,nx))\n\n if deg < 0:\n # Calculate median background of entire frame\n bg += np.median(dataim[np.where(mask2)])\n\n elif deg == None :\n # No background subtraction\n pass\n else:\n degs = np.ones(ny)*deg\n # Fit polynomial to each column\n for j in tqdm(range(ny)):\n nobadpixels = False\n # Create x indices for background sections of frame\n xvals = np.where(bgmask[j] == 1)[0]\n # If too few good pixels on either half of detector then compute average\n if (np.sum(bgmask[j,:int(nx/2)]) < deg) or (np.sum(bgmask[j,int(nx/2):nx]) < deg):\n degs[j] = 0\n while (nobadpixels == False):\n try:\n goodxvals = xvals[np.where(bgmask[j,xvals])]\n except:\n print('column: ', j, 'xvals: ', xvals)\n print(np.where(mask[j,xvals]))\n return\n dataslice = dataim[j,goodxvals]\n # Check for at least 1 good x value\n if len(goodxvals) == 0:\n nobadpixels = True #exit while loop\n #Use coefficients from previous row\n\n else:\n # Fit along spatial direction with a polynomial of degree 'deg'\n coeffs = np.polyfit(goodxvals, dataslice, deg=degs[j])\n # Evaluate model at goodexvals\n model = np.polyval(coeffs, goodxvals)\n\n #model = smooth.smooth(dataslice, window_len=window_len, window=windowtype)\n #model = sps.medfilt(dataslice, window_len)\n if isplots == 6:\n plt.figure(3601)\n plt.clf()\n plt.title(str(j))\n plt.plot(goodxvals, dataslice, 'bo')\n plt.plot(goodxvals, model, 'g-')\n plt.savefig(meta.outputdir + 'figs/Fig6_BG_'+str(j)+'.png')\n plt.pause(0.01)\n\n # Calculate residuals\n residuals = dataslice - model\n\n # Find worst data point\n loc = np.argmax(np.abs(residuals))\n # Calculate standard deviation of points excluding worst point\n ind = np.arange(0,len(residuals),1)\n ind = np.delete(ind, loc)\n stdres = np.std(residuals[ind])\n \n if stdres == 0:\n stdres = np.inf\n # Calculate number of sigma from the model\n stdevs = np.abs(residuals) / stdres\n #print(stdevs)\n\n # Mask data point if > threshold\n if stdevs[loc] > threshold:\n bgmask[j,goodxvals[loc]] = 0\n else:\n nobadpixels = True #exit while loop\n\n\n if isplots == 6:\n plt.figure(3601)\n plt.clf()\n plt.title(str(j))\n plt.plot(goodxvals, dataslice, 'bo')\n plt.plot(goodxvals, model, 'g-')\n plt.pause(0.01)\n plt.show()\n\n # Evaluate background model at all points, write model to background image\n if len(goodxvals) != 0:\n bg[j] = np.polyval(coeffs, range(nx))\n #bg[j] = np.interp(range(nx), goodxvals, model)\n\n if isrotate == 1:\n bg = (bg.T)[::-1]\n mask = (mask.T)[::-1]\n bgmask = (bgmask.T)[::-1]\n elif isrotate == 2:\n bg = (bg.T)\n mask = (mask.T)\n bgmask = (bgmask.T)\n\n return bg, bgmask#, mask #,variance\n\n\ndef fitbg3(data, isplots=0):\n \"\"\"\n Fit sky background with out-of-spectra data. Hopefully this is a faster\n routine than fitbg2. (optimized to fit across the x-direction)\n\n Parameters\n ----------\n dataim : np.ndarray \n Data image to fit the background to. \n isplots : int, optional \n The amount of plots saved; set in ecf. Default is 0. \n\n Returns\n -------\n bg : np.ndarray\n Background model.\n \"\"\"\n"
] | [
[
"numpy.concatenate",
"numpy.delete",
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"matplotlib.pyplot.plot",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.polyval",
"numpy.where",
"numpy.std",
"numpy.argmax",
"numpy.polyfit",
"matplotlib.pyplot.pause",
"numpy.abs",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"numpy.ediff1d"
]
] |
nesilin/evolution_TALL_adults | [
"f36d6ebaeb43376096c14fc9ca20116bc2febae6"
] | [
"ext_runs/run_deconstructSig/assign_signature_to_mutation.py"
] | [
"import pandas as pd\nimport os\nimport click\nimport subprocess\n\[email protected]()\[email protected]('--in_path',\n '-i',\n type=click.Path(exists=True),\n required = True,\n help=\"Input must be a string written in command line specifying the absolute path to the mutation list \"\n \"of the cohort\")\[email protected]('--output_path',\n '-o',\n type=click.Path(exists=True),\n required=True,\n help=\"Output must be a string written in command line specifying the absolute path for the output \"\n \"of the cohort\")\[email protected]('--sign',\n '-s',\n type=click.Path(exists=True),\n required=True,\n help=\"Path to the signature's matrices (cosmic/pcawg signatures)\")\[email protected]('--type',\n '-t',\n type=str,\n required=True,\n help=\"Type analysis subset (subset of signatures for BALL/TALL or all signatures of each leukemia)\")\n\n\ndef cli(in_path, output_path, sign, type):\n \"\"\"\n 'Given a MAF of mutations it runs deconstructSigs R package. Then with the output results of '\n 'deconstructSigs R package calculates the probability for each mutation (type) generated by each '\n 'signature using W and H matrices.'\n\n \"\"\"\n\n # READ DATASET AND FILTER OUT INDELS\n\n muts = pd.read_table(in_path, sep='\\t')\n\n # CREATE TEMPORAL FILE OF MUTATIONS WITHOUT INDELS\n\n os.mkdir(os.path.join(output_path, \"tmp\"))\n\n if 'SAMPLE' in muts.columns:\n muts[['#CHROM', 'POS', 'REF', 'ALT', 'SAMPLE']].to_csv(os.path.join(output_path, \"tmp\",\n \"mutations_snv.csv\"), sep='\\t',index=False)\n else:\n muts[['#CHROM', 'POS', 'REF', 'ALT', 'PATIENT']].to_csv(os.path.join(output_path, \"tmp\",\n \"mutations_snv.csv\"), sep='\\t', index=False)\n\n base_path = os.path.dirname(os.path.abspath(__file__))\n decons_script = os.path.join(base_path, 'deconstructSigs.r')\n\n # RUN DECONSTRUCTSIGN IN R\n subprocess.run(\n 'cd ' + output_path + ' && source activate deconstructR && Rscript '+ decons_script +' '+ os.path.join(\n output_path, \"tmp\", \"mutations_snv.csv\")+\" \"+sign+\" \"+type,\n shell=True, executable='/bin/bash')\n\n\n # REMOVE TEMPORAL FILE\n os.remove(os.path.join(output_path, \"tmp\",\n \"mutations_snv.csv\"))\n os.rmdir(os.path.join(output_path, \"tmp\"))\n\n # READ SIGNATURE FILE (WEIGHT MATRIX)\n W = pd.read_csv(sign, header=0, sep=\"\\t\")\n\n # TRANSPOSE\n W = W.T\n\n # READ EXPOSURE FILE\n H = pd.read_csv(os.path.join(output_path, \"signatures_weight.csv\"), header=0, sep=\"\\t\")\n\n # COMPUTE PROBABILITIES\n # go over each sample in H matrix and compute the probability for each mutation type in a tri-nuc context\n\n frames = [] # to collect results sample wise\n flag = 0\n for idx, row in H.iterrows(): # go over each sample\n sample = row['sample_id']\n sig_dic = {}\n allsigs = []\n # get the exposure (i.e total number of mutations belong to each signature) value for the particular sample from H matrix\n for col in H.columns:\n if col not in ['sample_id', 'SSE', 'mutation_count', 'unknown']:\n sig_dic[col] = row[col] * row['mutation_count'] # save the exposuse value in a dictionary per signature name\n allsigs.append(col) # save the signature names\n # multiple the exposure (from H) with the W matrix\n a = W.copy() # take a copy of the W matrix (which is the extracted signatures - not sample specific)\n\n for sig in allsigs:\n a[sig] *= sig_dic[\n sig] # mutiply the signature columns with the corresponding signature exposure in that particular sample\n\n # compute the row sum for normalization (i.e sum of values across signature for each mutation/context type)\n a['row_sum'] = a[allsigs].sum(axis=1)\n\n # normalize the row values with the row sum to driver\n # the probabilities for different signatures for each mutation type\n new = a[allsigs].div(a['row_sum'], axis=0)[allsigs]\n\n # add info columns\n new['Mutation_type'] = new.index\n new['Sample'] = sample\n\n # sort the columns\n columns = ['Sample', 'Mutation_type'] + allsigs\n\n new = new[columns]\n\n # save the results for each samples in a dataframe\n if flag == 0:\n frames = [new]\n flag += 1\n else:\n frames.append(new)\n\n results_new = pd.concat(frames)\n\n # WRITE RESULTS\n\n # this output file will contains the probabilities for each mutation type (under particular tri-nucleotides) to be\n # generated by different signatures per sample.\n results_new.to_csv(os.path.join(output_path, \"mutation_sign_prob.tsv\"), sep=\"\\t\", header=True, index=False)\n\n\nif __name__ == '__main__':\n cli()"
] | [
[
"pandas.concat",
"pandas.read_table",
"pandas.read_csv"
]
] |
lhz1029/DomainBed | [
"c3a3f6363974f5c9b16b82c4159eb54223699f2d"
] | [
"domainbed/networks.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models\n\nfrom domainbed.lib import misc\nfrom domainbed.lib import wide_resnet\n\n\nclass Identity(nn.Module):\n \"\"\"An identity layer\"\"\"\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\nclass SqueezeLastTwo(nn.Module):\n \"\"\"A module which squeezes the last two dimensions, ordinary squeeze can be a problem for batch size 1\"\"\"\n def __init__(self):\n super(SqueezeLastTwo, self).__init__()\n\n def forward(self, x):\n return x.view(x.shape[0], x.shape[1])\n\n\nclass MLP(nn.Module):\n \"\"\"Just an MLP\"\"\"\n def __init__(self, n_inputs, n_outputs, hparams):\n super(MLP, self).__init__()\n self.input = nn.Linear(n_inputs, hparams['mlp_width'])\n self.dropout = nn.Dropout(hparams['mlp_dropout'])\n self.hiddens = nn.ModuleList([\n nn.Linear(hparams['mlp_width'],hparams['mlp_width'])\n for _ in range(hparams['mlp_depth']-2)])\n self.output = nn.Linear(hparams['mlp_width'], n_outputs)\n self.n_outputs = n_outputs\n\n def forward(self, x):\n x = self.input(x)\n x = self.dropout(x)\n x = F.relu(x)\n for hidden in self.hiddens:\n x = hidden(x)\n x = self.dropout(x)\n x = F.relu(x)\n x = self.output(x)\n return x\n\nclass ResNet(torch.nn.Module):\n \"\"\"ResNet with the softmax chopped off and the batchnorm frozen\"\"\"\n def __init__(self, input_shape, hparams):\n super(ResNet, self).__init__()\n if hparams['resnet18']:\n self.network = torchvision.models.resnet18(pretrained=hparams['pretrained'])\n self.n_outputs = 512\n else:\n self.network = torchvision.models.resnet50(pretrained=hparams['pretrained'])\n self.n_outputs = 2048\n\n # adapt number of channels\n nc = input_shape[0]\n if nc != 3:\n tmp = self.network.conv1.weight.data.clone()\n\n self.network.conv1 = nn.Conv2d(\n nc, 64, kernel_size=(7, 7),\n stride=(2, 2), padding=(3, 3), bias=False)\n\n for i in range(nc):\n self.network.conv1.weight.data[:, i, :, :] = tmp[:, i % 3, :, :]\n\n # save memory\n del self.network.fc\n self.network.fc = Identity()\n\n self.freeze_bn()\n self.hparams = hparams\n self.dropout = nn.Dropout(hparams['resnet_dropout'])\n\n def forward(self, x):\n \"\"\"Encode x into a feature vector of size n_outputs.\"\"\"\n return self.dropout(self.network(x))\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() to freeze the BN parameters\n \"\"\"\n super().train(mode)\n self.freeze_bn()\n\n def freeze_bn(self):\n for m in self.network.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\nclass MNIST_CNN(nn.Module):\n \"\"\"\n Hand-tuned architecture for MNIST.\n Weirdness I've noticed so far with this architecture:\n - adding a linear layer after the mean-pool in features hurts\n RotatedMNIST-100 generalization severely.\n \"\"\"\n n_outputs = 128\n\n def __init__(self, input_shape):\n super(MNIST_CNN, self).__init__()\n self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1)\n self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1)\n self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1)\n self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1)\n\n self.bn0 = nn.GroupNorm(8, 64)\n self.bn1 = nn.GroupNorm(8, 128)\n self.bn2 = nn.GroupNorm(8, 128)\n self.bn3 = nn.GroupNorm(8, 128)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n self.squeezeLastTwo = SqueezeLastTwo()\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.bn0(x)\n\n x = self.conv2(x)\n x = F.relu(x)\n x = self.bn1(x)\n\n x = self.conv3(x)\n x = F.relu(x)\n x = self.bn2(x)\n\n x = self.conv4(x)\n x = F.relu(x)\n x = self.bn3(x)\n\n x = self.avgpool(x)\n x = self.squeezeLastTwo(x)\n return x\n\nclass ContextNet(nn.Module):\n def __init__(self, input_shape):\n super(ContextNet, self).__init__()\n\n # Keep same dimensions\n padding = (5 - 1) // 2\n self.context_net = nn.Sequential(\n nn.Conv2d(input_shape[0], 64, 5, padding=padding),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(64, 64, 5, padding=padding),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(64, 1, 5, padding=padding),\n )\n\n def forward(self, x):\n return self.context_net(x)\n\n\ndef Featurizer(input_shape, hparams):\n \"\"\"Auto-select an appropriate featurizer for the given input shape.\"\"\"\n if len(input_shape) == 1:\n return MLP(input_shape[0], 128, hparams)\n elif input_shape[1:3] == (28, 28):\n return MNIST_CNN(input_shape)\n elif input_shape[1:3] == (32, 32):\n return wide_resnet.Wide_ResNet(input_shape, 16, 2, 0.)\n elif input_shape[1:3] == (512, 512):\n return ResNet(input_shape, hparams)\n else:\n raise NotImplementedError\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.relu"
]
] |
firestonelib/PASSL | [
"f08475c7230282ba5185bf1d2bc3ee39f14dfdee"
] | [
"passl/datasets/preprocess/mixup.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# code was heavily based on\n# https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/mixup.py\n# Copyright (c) 2020 Ross Wightman\n\nimport numpy as np\nimport paddle\n\n\ndef one_hot(x, num_classes, on_value=1., off_value=0.):\n matrix = np.full((x.shape[0], num_classes), off_value)\n for ith in range(len(x)):\n matrix[ith][x[ith].numpy()] = on_value\n return paddle.to_tensor(matrix)\n\n\ndef mixup_target(target, num_classes, lam=1., smoothing=0.0):\n off_value = smoothing / num_classes\n on_value = 1. - smoothing + off_value\n y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value)\n y2 = one_hot(target.flip(0),\n num_classes,\n on_value=on_value,\n off_value=off_value)\n return y1 * lam + y2 * (1. - lam)\n\n\ndef rand_bbox(img_shape, lam, margin=0., count=None):\n \"\"\" Standard CutMix bounding-box\n Generates a random square bbox based on lambda value. This impl includes\n support for enforcing a border margin as percent of bbox dimensions.\n\n Args:\n img_shape (tuple): Image shape as tuple\n lam (float): Cutmix lambda value\n margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)\n count (int): Number of bbox to generate\n \"\"\"\n ratio = np.sqrt(1 - lam)\n img_h, img_w = img_shape[-2:]\n cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)\n margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)\n cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)\n cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)\n yl = np.clip(cy - cut_h // 2, 0, img_h)\n yh = np.clip(cy + cut_h // 2, 0, img_h)\n xl = np.clip(cx - cut_w // 2, 0, img_w)\n xh = np.clip(cx + cut_w // 2, 0, img_w)\n return yl, yh, xl, xh\n\n\ndef rand_bbox_minmax(img_shape, minmax, count=None):\n \"\"\" Min-Max CutMix bounding-box\n Inspired by Darknet cutmix impl, generates a random rectangular bbox\n based on min/max percent values applied to each dimension of the input image.\n\n Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.\n\n Args:\n img_shape (tuple): Image shape as tuple\n minmax (tuple or list): Min and max bbox ratios (as percent of image size)\n count (int): Number of bbox to generate\n \"\"\"\n assert len(minmax) == 2\n img_h, img_w = img_shape[-2:]\n cut_h = np.random.randint(int(img_h * minmax[0]),\n int(img_h * minmax[1]),\n size=count)\n cut_w = np.random.randint(int(img_w * minmax[0]),\n int(img_w * minmax[1]),\n size=count)\n yl = np.random.randint(0, img_h - cut_h, size=count)\n xl = np.random.randint(0, img_w - cut_w, size=count)\n yu = yl + cut_h\n xu = xl + cut_w\n return yl, yu, xl, xu\n\n\ndef cutmix_bbox_and_lam(img_shape,\n lam,\n ratio_minmax=None,\n correct_lam=True,\n count=None):\n \"\"\" Generate bbox and apply lambda correction.\n \"\"\"\n if ratio_minmax is not None:\n yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)\n else:\n lam = np.clip(lam, 0.9999, 0.9999)\n yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)\n if correct_lam or ratio_minmax is not None:\n bbox_area = (yu - yl) * (xu - xl)\n lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])\n return (yl, yu, xl, xu), lam\n\n\nclass Mixup:\n \"\"\"\n Mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)\n Cutmix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)\n\tMixup/Cutmix that applies different params to each element or whole batch\n Code Reference:\n CutMix: https://github.com/clovaai/CutMix-PyTorch\n Hacked together by / Copyright 2020 Ross Wightman\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.\n cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.\n prob (float): probability of applying mixup or cutmix per batch or element\n switch_prob (float): probability of switching to cutmix instead of mixup when both are active\n mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)\n correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders\n label_smoothing (float): apply label smoothing to the mixed target tensor\n num_classes (int): number of classes for target\n \"\"\"\n def __init__(self,\n mixup_alpha=1.,\n cutmix_alpha=0.,\n cutmix_minmax=None,\n prob=1.0,\n switch_prob=0.5,\n mode='batch',\n correct_lam=True,\n label_smoothing=0.1,\n num_classes=1000):\n self.mixup_alpha = mixup_alpha\n self.cutmix_alpha = cutmix_alpha\n self.cutmix_minmax = cutmix_minmax\n if self.cutmix_minmax is not None:\n assert len(self.cutmix_minmax) == 2\n # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe\n self.cutmix_alpha = 1.0\n self.mix_prob = prob\n self.switch_prob = switch_prob\n self.label_smoothing = label_smoothing\n self.num_classes = num_classes\n self.mode = mode\n self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix\n self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)\n\n def _params_per_elem(self, batch_size):\n lam = np.ones(batch_size, dtype=np.float32)\n use_cutmix = np.zeros(batch_size, dtype=np.bool)\n if self.mixup_enabled:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand(batch_size) < self.switch_prob\n lam_mix = np.where(\n use_cutmix,\n np.random.beta(self.cutmix_alpha,\n self.cutmix_alpha,\n size=batch_size),\n np.random.beta(self.mixup_alpha,\n self.mixup_alpha,\n size=batch_size))\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha,\n self.mixup_alpha,\n size=batch_size)\n elif self.cutmix_alpha > 0.:\n use_cutmix = np.ones(batch_size, dtype=np.bool)\n lam_mix = np.random.beta(self.cutmix_alpha,\n self.cutmix_alpha,\n size=batch_size)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = np.where(\n np.random.rand(batch_size) < self.mix_prob,\n lam_mix.astype(np.float32), lam)\n return lam, use_cutmix\n\n def _params_per_batch(self):\n lam = 1.\n use_cutmix = False\n if self.mixup_enabled and np.random.rand() < self.mix_prob:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand() < self.switch_prob\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \\\n np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.cutmix_alpha > 0.:\n use_cutmix = True\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = float(lam_mix)\n return lam, use_cutmix\n\n def _mix_elem(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size)\n x_orig = x.clone(\n ) # need to keep an unmodified original for mixing source\n for i in range(batch_size):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape,\n lam,\n ratio_minmax=self.cutmix_minmax,\n correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n return paddle.to_tensor(lam_batch, device=x.device,\n dtype=x.dtype).unsqueeze(1)\n\n def _mix_pair(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n x_orig = x.clone(\n ) # need to keep an unmodified original for mixing source\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape,\n lam,\n ratio_minmax=self.cutmix_minmax,\n correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n x[j] = x[j] * lam + x_orig[i] * (1 - lam)\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return paddle.to_tensor(lam_batch, device=x.device,\n dtype=x.dtype).unsqueeze(1)\n\n def _mix_batch(self, x):\n lam, use_cutmix = self._params_per_batch()\n if lam == 1.:\n return 1.\n if use_cutmix:\n (yl, yh, xl,\n xh), lam = cutmix_bbox_and_lam(x.shape,\n lam,\n ratio_minmax=self.cutmix_minmax,\n correct_lam=self.correct_lam)\n yl, yh, xl, xh = int(yl), int(yh), int(xl), int(xh)\n x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]\n else:\n x_flipped = x.flip(0) * (1. - lam)\n x = x * lam\n x += x_flipped\n return x, lam\n\n def __call__(self, x, target):\n assert len(x) % 2 == 0, 'Batch size should be even when using this'\n if self.mode == 'elem':\n x, lam = self._mix_elem(x)\n elif self.mode == 'pair':\n x, lam = self._mix_pair(x)\n else:\n x, lam = self._mix_batch(x)\n target = mixup_target(target, self.num_classes, lam,\n self.label_smoothing)\n return x, target\n\n\nclass FastCollateMixup(Mixup):\n \"\"\" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch\n\n A Mixup impl that's performed while collating the batches.\n \"\"\"\n def _mix_elem_collate(self, output, batch, half=False):\n batch_size = len(batch)\n num_elem = batch_size // 2 if half else batch_size\n assert len(output) == num_elem\n lam_batch, use_cutmix = self._params_per_elem(num_elem)\n for i in range(num_elem):\n j = batch_size - i - 1\n lam = lam_batch[i]\n mixed = batch[i][0]\n if lam != 1.:\n if use_cutmix[i]:\n if not half:\n mixed = mixed.copy()\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n output.shape,\n lam,\n ratio_minmax=self.cutmix_minmax,\n correct_lam=self.correct_lam)\n mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(\n np.float32) * (1 - lam)\n np.rint(mixed, out=mixed)\n output[i] += torch.from_numpy(mixed.astype(np.uint8))\n if half:\n lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))\n return paddle.to_tensor(lam_batch).unsqueeze(1)\n\n def _mix_pair_collate(self, output, batch):\n batch_size = len(batch)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n mixed_i = batch[i][0]\n mixed_j = batch[j][0]\n assert 0 <= lam <= 1.0\n if lam < 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n output.shape,\n lam,\n ratio_minmax=self.cutmix_minmax,\n correct_lam=self.correct_lam)\n patch_i = mixed_i[:, yl:yh, xl:xh].copy()\n mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]\n mixed_j[:, yl:yh, xl:xh] = patch_i\n lam_batch[i] = lam\n else:\n mixed_temp = mixed_i.astype(\n np.float32) * lam + mixed_j.astype(\n np.float32) * (1 - lam)\n mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(\n np.float32) * (1 - lam)\n mixed_i = mixed_temp\n np.rint(mixed_j, out=mixed_j)\n np.rint(mixed_i, out=mixed_i)\n output[i] += torch.from_numpy(mixed_i.astype(np.uint8))\n output[j] += torch.from_numpy(mixed_j.astype(np.uint8))\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return paddle.to_tensor(lam_batch).unsqueeze(1)\n\n def _mix_batch_collate(self, output, batch):\n batch_size = len(batch)\n lam, use_cutmix = self._params_per_batch()\n if use_cutmix:\n (yl, yh, xl,\n xh), lam = cutmix_bbox_and_lam(output.shape,\n lam,\n ratio_minmax=self.cutmix_minmax,\n correct_lam=self.correct_lam)\n for i in range(batch_size):\n j = batch_size - i - 1\n mixed = batch[i][0]\n if lam != 1.:\n if use_cutmix:\n mixed = mixed.copy(\n ) # don't want to modify the original while iterating\n mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]\n else:\n mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(\n np.float32) * (1 - lam)\n np.rint(mixed, out=mixed)\n output[i] += torch.from_numpy(mixed.astype(np.uint8))\n return lam\n\n def __call__(self, batch, _=None):\n batch_size = len(batch)\n assert batch_size % 2 == 0, 'Batch size should be even when using this'\n half = 'half' in self.mode\n if half:\n batch_size //= 2\n output = paddle.zeros((batch_size, *batch[0][0].shape), dtype='uint8')\n if self.mode == 'elem' or self.mode == 'half':\n lam = self._mix_elem_collate(output, batch, half=half)\n elif self.mode == 'pair':\n lam = self._mix_pair_collate(output, batch)\n else:\n lam = self._mix_batch_collate(output, batch)\n target = paddle.to_tensor([b[1] for b in batch], dtype='int64')\n target = mixup_target(target,\n self.num_classes,\n lam,\n self.label_smoothing,\n device='cpu')\n target = target[:batch_size]\n return output, target\n"
] | [
[
"numpy.concatenate",
"numpy.full",
"numpy.random.rand",
"numpy.zeros",
"numpy.rint",
"numpy.ones",
"numpy.random.beta",
"numpy.random.randint",
"numpy.sqrt",
"numpy.clip"
]
] |
dcs4cop/xcube-sh | [
"74f7eaab3e43abf2896f04db768131107383563f"
] | [
"test/test_cube.py"
] | [
"# The MIT License (MIT)\n# Copyright (c) 2020 by the xcube development team and contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport unittest\n\nimport numpy as np\nimport xarray as xr\n\nfrom test.test_sentinelhub import HAS_SH_CREDENTIALS\nfrom test.test_sentinelhub import REQUIRE_SH_CREDENTIALS\nfrom xcube_sh.config import CubeConfig\nfrom xcube_sh.cube import open_cube\nfrom xcube_sh.sentinelhub import SentinelHub\n\ncube_config = CubeConfig(dataset_name='S2L1C',\n band_names=['B04'],\n bbox=(10.00, 54.27, 11.00, 54.60),\n spatial_res=0.00018,\n time_range=('2018-05-14', '2018-07-31'),\n time_tolerance='30M')\n\ncube_config_t1_none = CubeConfig(dataset_name='S2L1C',\n band_names=['B04'],\n bbox=(10.00, 54.27, 11.00, 54.60),\n spatial_res=0.00018,\n time_range=('2021-01-01', None),\n time_period='1D')\n\ncube_config_t_none = CubeConfig(dataset_name='S2L1C',\n band_names=['B04'],\n bbox=(10.00, 54.27, 11.00, 54.60),\n spatial_res=0.00018,\n time_range=(None, '2021-02-18'),\n time_tolerance='30M')\n\ncube_config_with_crs = CubeConfig(dataset_name='S2L1C',\n band_names=['B01'],\n tile_size=(512, 512),\n bbox=(-1953275.571528, 1648364.470634, -1936149.179188, 1664301.688856),\n crs=\"http://www.opengis.net/def/crs/EPSG/0/3857\",\n spatial_res=10, # in meters\n time_range=('2018-05-14', '2020-07-31'),\n time_tolerance='30M')\n\ncube_config_LOTL2 = CubeConfig(dataset_name='LOTL2',\n band_names=['B02', 'B03'],\n bbox=(-17.554176, 14.640112, -17.387367, 14.792487),\n spatial_res=0.000089,\n time_range=('2018-05-14', '2020-07-31'),\n time_tolerance='30M')\n\ncube_config_S2L2A = CubeConfig(dataset_name='S2L2A',\n band_names=['B03', 'B08', 'CLM'],\n bbox=(2894267.8988124575, 9262943.968658403, 2899443.8488556934, 9268505.554239485),\n crs=\"http://www.opengis.net/def/crs/EPSG/0/3857\",\n spatial_res=10,\n time_range=('2020-06-01', '2020-06-30'),\n )\n\ncube_config_S2L2A_1D = CubeConfig(dataset_name='S2L2A',\n band_names=['B03', 'B08', 'CLM'],\n bbox=(2894267.8988124575, 9262943.968658403, 2899443.8488556934, 9268505.554239485),\n crs=\"http://www.opengis.net/def/crs/EPSG/0/3857\",\n spatial_res=10,\n time_range=('2020-06-01', '2020-06-30'),\n time_period='1D'\n )\n\ncube_config_S2L2A_WGS84 = CubeConfig(dataset_name='S2L2A',\n band_names=['B03', 'B08', 'CLM'],\n bbox=(25.99965089839723, 63.65600798545179, 26.046183630114623, 63.67816348259773),\n spatial_res=0.0001,\n time_range=('2020-06-01', '2020-06-30'),\n )\n\ncube_config_S2L2A_WGS84_1D = CubeConfig(dataset_name='S2L2A',\n band_names=['B03', 'B08', 'CLM'],\n bbox=(\n 25.99965089839723, 63.65600798545179, 26.046183630114623, 63.67816348259773),\n spatial_res=0.0001,\n time_range=('2020-06-01', '2020-06-30'),\n time_period='1D'\n )\n\n\[email protected](HAS_SH_CREDENTIALS, REQUIRE_SH_CREDENTIALS)\nclass CubeTest(unittest.TestCase):\n def test_open_cube(self):\n cube = open_cube(cube_config=cube_config)\n self.assertIsInstance(cube, xr.Dataset)\n\n def test_time_max_none(self):\n cube = open_cube(cube_config=cube_config_t1_none)\n self.assertIsInstance(cube, xr.Dataset)\n self.assertEqual(np.datetime64('today', 'D'), np.datetime64(cube.time.values[-1], 'D')) # self.assertEqual()\n\n def test_time_none(self):\n cube = open_cube(cube_config=cube_config_t_none)\n self.assertIsInstance(cube, xr.Dataset)\n self.assertEqual(np.datetime64('2021-02-16'), np.datetime64(cube.time.values[-1], 'D')) # self.assertEqual()\n self.assertEqual(np.datetime64('2015-06-27'), np.datetime64(cube.time.values[0], 'D')) # self.assertEqual()\n\n\[email protected](HAS_SH_CREDENTIALS, REQUIRE_SH_CREDENTIALS)\nclass CubeWithCredentialsTest(unittest.TestCase):\n\n def test_open_cube_with_illegal_kwargs(self):\n with self.assertRaises(ValueError) as cm:\n open_cube(cube_config=cube_config,\n sentinel_hub=SentinelHub(),\n api_url=\"https://creodias.sentinel-hub.com/api/v1/catalog/collections\")\n self.assertEqual('unexpected keyword-arguments: api_url', f'{cm.exception}')\n\n @unittest.skipUnless(HAS_SH_CREDENTIALS, REQUIRE_SH_CREDENTIALS)\n def test_open_cube_with_other_crs(self):\n cube = open_cube(cube_config_with_crs)\n self.assertIsInstance(cube, xr.Dataset)\n self.assertEqual({'time': 160, 'y': 2048, 'x': 2048, 'bnds': 2}, cube.dims)\n self.assertEqual({'x', 'y', 'time', 'time_bnds'}, set(cube.coords))\n self.assertEqual({'B01', 'crs'}, set(cube.data_vars))\n\n @unittest.skipUnless(HAS_SH_CREDENTIALS, REQUIRE_SH_CREDENTIALS)\n def test_open_cube_LOTL2(self):\n cube = open_cube(cube_config_LOTL2, api_url=\"https://services-uswest2.sentinel-hub.com\")\n self.assertIsInstance(cube, xr.Dataset)\n self.assertEqual({'time': 100, 'lat': 1912, 'lon': 2094, 'bnds': 2}, cube.dims)\n self.assertEqual({'lat', 'lon', 'time', 'time_bnds'}, set(cube.coords))\n self.assertEqual({'B02', 'B03'}, set(cube.data_vars))\n\n # used to debug xcube-sh issue 60\n @unittest.skipUnless(HAS_SH_CREDENTIALS, REQUIRE_SH_CREDENTIALS)\n def test_open_cube_S2L2A_vs_S2L2A_WGS84(self):\n cube_wgs84 = open_cube(cube_config_S2L2A_WGS84)\n cube = open_cube(cube_config_S2L2A)\n self.assertTrue(cube.time.equals(cube_wgs84.time))\n self.assertIsInstance(cube, xr.Dataset)\n cube = cube.dropna(dim=\"time\")\n cube_wgs84 = cube_wgs84.dropna(dim=\"time\")\n self.assertTrue(cube.time.equals(cube_wgs84.time))\n self.assertEqual(12, len(cube.time))\n self.assertEqual({'lat', 'lon', 'time', 'time_bnds'}, set(cube_wgs84.coords))\n self.assertEqual({'x', 'y', 'time', 'time_bnds'}, set(cube.coords))\n self.assertEqual({'B03', 'B08', 'CLM', 'crs'}, set(cube.data_vars))\n\n # used to debug xcube-sh issue 60\n @unittest.skipUnless(HAS_SH_CREDENTIALS, REQUIRE_SH_CREDENTIALS)\n def test_open_cube_S2L2A_1D(self):\n cube = open_cube(cube_config_S2L2A_1D)\n cube_wgs84 = open_cube(cube_config_S2L2A_WGS84_1D)\n self.assertIsInstance(cube, xr.Dataset)\n self.assertIsInstance(cube_wgs84, xr.Dataset)\n cube = cube.dropna(dim=\"time\")\n cube_wgs84 = cube_wgs84.dropna(dim=\"time\")\n self.assertTrue(cube.time.equals(cube_wgs84.time))\n self.assertEqual({'lat', 'lon', 'time', 'time_bnds'}, set(cube_wgs84.coords))\n self.assertEqual({'x', 'y', 'time', 'time_bnds'}, set(cube.coords))\n self.assertEqual({'B03', 'B08', 'CLM', 'crs'}, set(cube.data_vars))\n"
] | [
[
"numpy.datetime64"
]
] |
The-Learning-And-Vision-Atelier-LAVA/PoSFeat | [
"e8a42c05158384113e1a0eafecf84b516a88c1f1"
] | [
"losses/kploss.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F \nfrom .preprocess_utils import *\nfrom torch.distributions import Categorical, Bernoulli\n\nclass DiskLoss(nn.Module):\n def __init__(self, configs, device=None):\n super(DiskLoss, self).__init__()\n self.__lossname__ = 'DiskLoss'\n self.config = configs\n self.unfold_size = self.config['grid_size']\n self.t_base = self.config['temperature_base']\n self.t_max = self.config['temperature_max']\n self.reward = getattr(self, self.config['epipolar_reward'])\n self.good_reward = self.config['good_reward']\n self.bad_reward = self.config['bad_reward']\n self.kp_penalty = self.config['kp_penalty']\n\n def point_distribution(self, logits):\n proposal_dist = Categorical(logits=logits) # bx1x(h//g)x(w//g)x(g*g)\n proposals = proposal_dist.sample() # bx1x(h//g)x(w//g)\n proposal_logp = proposal_dist.log_prob(proposals) # bx1x(h//g)x(w//g)\n\n # accept_logits = select_on_last(logits, proposals).squeeze(-1)\n accept_logits = torch.gather(logits, dim=-1, index=proposals[..., None]).squeeze(-1) # bx1x(h//g)x(w//g)\n\n accept_dist = Bernoulli(logits=accept_logits)\n accept_samples = accept_dist.sample() # bx1x(h//g)x(w//g)\n accept_logp = accept_dist.log_prob(accept_samples) # for accepted points, equals to sigmoid() then log(); for denied, (1-sigmoid).log\n accept_mask = accept_samples == 1.\n\n logp = proposal_logp + accept_logp\n\n return proposals, accept_mask, logp\n\n def point_sample(self, kp_map):\n kpmap_unfold = unfold(kp_map, self.unfold_size)\n proposals, accept_mask, logp = self.point_distribution(kpmap_unfold)\n\n b, _, h, w = kp_map.shape\n grids_org = gen_grid(h_min=0, h_max=h-1, w_min=0, w_max=w-1, len_h=h, len_w=w)\n grids_org = grids_org.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)\n grids_org = grids_org.permute(0,3,1,2) # bx2xhxw\n grids_unfold = unfold(grids_org, self.unfold_size) # bx2x(h//g)x(w//g)x(g*g)\n\n kps = grids_unfold.gather(dim=4, index=proposals.unsqueeze(-1).repeat(1,2,1,1,1))\n return kps.squeeze(4).permute(0,2,3,1), logp, accept_mask\n\n @ torch.no_grad()\n def constant_reward(self, inputs, outputs, coord1, coord2, reward_thr, rescale_thr):\n coord1_h = homogenize(coord1).transpose(1, 2) #bx3xm\n coord2_h = homogenize(coord2).transpose(1, 2) #bx3xn\n fmatrix = inputs['F1']\n fmatrix2 = inputs['F2']\n\n # compute the distance of the points in the second image\n epipolar_line = fmatrix.bmm(coord1_h)\n epipolar_line_ = epipolar_line / torch.clamp(\n torch.norm(epipolar_line[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)\n epipolar_dist = torch.abs(epipolar_line_.transpose(1, 2)@coord2_h) #bxmxn\n\n # compute the distance of the points in the first image\n epipolar_line2 = fmatrix2.bmm(coord2_h)\n epipolar_line2_ = epipolar_line2 / torch.clamp(\n torch.norm(epipolar_line2[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)\n epipolar_dist2 = torch.abs(epipolar_line2_.transpose(1, 2)@coord1_h) #bxnxm\n epipolar_dist2 = epipolar_dist2.transpose(1,2) #bxmxn\n\n if rescale_thr:\n b, _, _ = epipolar_dist.shape\n dist1 = epipolar_dist.detach().reshape(b, -1).mean(1,True)\n dist2 = epipolar_dist2.detach().reshape(b,-1).mean(1,True)\n dist_ = torch.cat([dist1, dist2], dim=1)\n scale1 = dist1/dist_.min(1,True)[0].clamp(1e-6)\n scale2 = dist2/dist_.min(1,True)[0].clamp(1e-6)\n thr1 = reward_thr*scale1\n thr2 = reward_thr*scale2\n thr1 = thr1.reshape(b,1,1)\n thr2 = thr2.reshape(b,1,1)\n else:\n thr1 = reward_thr\n thr2 = reward_thr\n scale1 = epipolar_dist2.new_tensor(1.) \n scale2 = epipolar_dist2.new_tensor(1.) \n\n good = (epipolar_dist<thr1) & (epipolar_dist2<thr2)\n reward = self.good_reward*good + self.bad_reward*(~good)\n return reward, scale1, scale2\n\n @ torch.no_grad()\n def dynamic_reward(self, inputs, outputs, coord1, coord2, reward_thr, rescale_thr):\n coord1_h = homogenize(coord1).transpose(1, 2) #bx3xm\n coord2_h = homogenize(coord2).transpose(1, 2) #bx3xn\n fmatrix = inputs['F1']\n fmatrix2 = inputs['F2']\n\n # compute the distance of the points in the second image\n epipolar_line = fmatrix.bmm(coord1_h)\n epipolar_line_ = epipolar_line / torch.clamp(\n torch.norm(epipolar_line[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)\n epipolar_dist = torch.abs(epipolar_line_.transpose(1, 2)@coord2_h) #bxmxn\n\n # compute the distance of the points in the first image\n epipolar_line2 = fmatrix2.bmm(coord2_h)\n epipolar_line2_ = epipolar_line2 / torch.clamp(\n torch.norm(epipolar_line2[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)\n epipolar_dist2 = torch.abs(epipolar_line2_.transpose(1, 2)@coord1_h) #bxnxm\n epipolar_dist2 = epipolar_dist2.transpose(1,2) #bxmxn\n\n if rescale_thr:\n b, _, _ = epipolar_dist.shape\n dist1 = epipolar_dist.detach().reshape(b, -1).mean(1,True)\n dist2 = epipolar_dist2.detach().reshape(b,-1).mean(1,True)\n dist_ = torch.cat([dist1, dist2], dim=1)\n scale1 = dist1/dist_.min(1,True)[0].clamp(1e-6)\n scale2 = dist2/dist_.min(1,True)[0].clamp(1e-6)\n thr1 = reward_thr*scale1\n thr2 = reward_thr*scale2\n thr1 = thr1.reshape(b,1,1)\n thr2 = thr2.reshape(b,1,1)\n else:\n thr1 = reward_thr\n thr2 = reward_thr\n scale1 = epipolar_dist2.new_tensor(1.) \n scale2 = epipolar_dist2.new_tensor(1.) \n\n reward = torch.exp(-epipolar_dist/thr1) + torch.exp(-epipolar_dist2/thr2) - 2/torch.exp(torch.ones_like(epipolar_dist)).to(epipolar_dist)\n reward = reward.clamp(min=self.bad_reward)\n return reward, scale1, scale2\n\n def forward(self, inputs, outputs, processed):\n preds1 = outputs['preds1']\n preds2 = outputs['preds2']\n kp_map1, kp_map2 = preds1['local_point'], preds2['local_point']\n xf1, xf2 = preds1['local_map'], preds2['local_map']\n b,c,h4,w4 = xf1.shape\n _, _, h, w = kp_map1.shape\n temperature = min(self.t_base + outputs['epoch'], self.t_max)\n \n coord1, logp1, accept_mask1 = self.point_sample(kp_map1) # bx(h//g)x(w//g)x2 bx1x(h//g)x(w//g) bx1x(h//g)x(w//g)\n coord2, logp2, accept_mask2 = self.point_sample(kp_map2)\n coord1 = coord1.reshape(b,-1,2)\n coord2 = coord2.reshape(b,-1,2)\n\n coord1_n = normalize_coords(coord1, h, w) # bx((h//g)*(w//g))x2\n coord2_n = normalize_coords(coord2, h, w)\n\n # feat1 = F.grid_sample(xf1, coord1_n, align_corners=False).reshape(b,c,-1) # bxcx((h//g)*(w//g))\n # feat2 = F.grid_sample(xf2, coord2_n, align_corners=False).reshape(b,c,-1)\n feat1 = sample_feat_by_coord(xf1, coord1_n, self.config['loss_distance']=='cos') #bxmxc\n feat2 = sample_feat_by_coord(xf2, coord2_n, self.config['loss_distance']=='cos') #bxnxc\n\n # matching\n if self.config['match_grad']:\n costs = [email protected](1,2) # bxmxn 0-2\n else:\n with torch.no_grad():\n costs = [email protected](1,2) # bxmxn 0-2\n affinity = -temperature * costs\n\n cat_I = Categorical(logits=affinity)\n cat_T = Categorical(logits=affinity.transpose(1,2))\n\n dense_p = cat_I.probs * cat_T.probs.transpose(1,2)\n dense_logp = cat_I.logits + cat_T.logits.transpose(1,2)\n\n if self.config['cor_detach']:\n sample_p = dense_p.detach()\n else:\n sample_p = dense_p\n\n reward, scale1, scale2 = self.reward(inputs, outputs, coord1, coord2, **self.config['reward_config'])\n\n kps_logp = logp1.reshape(b,1,-1).transpose(1,2) + logp2.reshape(b,1,-1) # bxmxn\n sample_plogp = sample_p * (dense_logp + kps_logp)\n accept_mask = accept_mask1.reshape(b,1,-1).transpose(1,2) * accept_mask2.reshape(b,1,-1) # bxmxn\n\n reinforce = (reward[accept_mask] * sample_plogp[accept_mask]).sum()\n kp_penalty = self.kp_penalty * (logp1[accept_mask1].sum()+logp2[accept_mask2].sum())\n\n loss = -reinforce - kp_penalty\n\n sample_p_detach = sample_p.detach()\n components = {'reinforce':reinforce.detach(), 'kp_penalty': kp_penalty.detach(), \n 'scale1': scale1, 'scale2':scale2, \n 'cor minmax': sample_p_detach.view(b,-1).max(-1)[0].min(), \n 'cor minmean': sample_p_detach.view(b,-1).mean(-1).min(), \n 'cor max': sample_p_detach.max(), \n 'cor mean': sample_p_detach.mean(), \n 'cor summin': torch.min(sample_p_detach.sum(1).min(), sample_p_detach.sum(2).min()), \n 'cor summax': torch.max(sample_p_detach.sum(1).max(), sample_p_detach.sum(2).max()),\n 'n_kps': (accept_mask1.detach().reshape(b,1,-1).sum(-1) + accept_mask2.detach().reshape(b,1,-1).sum(-1)).float().mean(),\n 'n_pairs': sample_p.detach().sum(-1).sum(-1).mean(),\n 'temperature': sample_p_detach.new_tensor(temperature)\n }\n return loss, components"
] | [
[
"torch.cat",
"torch.distributions.Categorical",
"torch.gather",
"torch.norm",
"torch.no_grad",
"torch.distributions.Bernoulli",
"torch.ones_like",
"torch.exp"
]
] |
truher/FRC2022 | [
"fb8a39d0212e8460669feb282f8b9977f17fdd93"
] | [
"simulator/firing_models/fit_lm.py"
] | [
"# use lmfit to extract some parameters from the simulated data, old-school.\n#\n# note:\n# * i have no model for p(hit). bleah\n# * i'm also not using the precision values, which seems wrong.\n\nfrom typing import Any\nimport pandas as pd # type:ignore\nimport matplotlib.pyplot as plt # type:ignore\nimport numpy as np\nfrom lmfit import Model # type:ignore\n\ndef plot_results(bff: Any, obs: str, pred: str, lim: float, measure: str) -> None:\n # multiplot, range precision x gun precision\n urp = pd.unique(bff['rp'])\n ugp = pd.unique(bff['gp'])\n _, axs = plt.subplots(len(urp), len(ugp))\n for s1 in range(len(urp)):\n for s2 in range(len(ugp)):\n rp = urp[s1]\n gp = ugp[s2]\n filtered = bff.loc[(bff['gp']==gp) & (bff['rp']==rp)]\n axis = axs if len(urp) == 1 and len(ugp) == 1 else axs[s1,s2]\n axis.scatter(filtered['r'],filtered[obs], label=f\"r{rp} g{gp}\")\n axis.plot(filtered['r'],filtered[pred], color=\"red\")\n axis.set_xlim((0,9))\n axis.set_ylim((0,lim))\n axis.set_xlabel(\"actual range (m)\")\n axis.set_ylabel(measure)\n axis.set_title(f\"range precision {rp} gun precision {gp}\")\n plt.tight_layout(pad=0)\n plt.show()\n\ndef f_velocity(r, gp, rp, a_v=1, b_v=5, c_v=1, a_gp=100, b_gp=0, a_rp=100, b_rp=0):\n #return (a_v*r + b_v + c_v/r) * (a_gp * gp + b_gp) * (a_rp * rp + b_rp) # multiplicative errors?\n return a_v * r + b_v + c_v / r\n\ndef f_elevation(r, gp, rp, a_e=100, b_e=-1, a_gp=100, b_gp=0, a_rp=100, b_rp=0):\n #return a_e*pow(r,b_e) * (a_gp * gp + b_gp) * (a_rp * rp + b_rp) # multiplicative errors?\n return a_e * pow(r, b_e)\n\ndef fake_data():\n r = []\n rp = []\n gp = []\n velocity=[]\n elevation=[]\n for r_i in np.arange(1.0, 9.0, 1.0):\n for rp_i in [0.01, 0.05, 0.1]:\n for gp_i in [0.01, 0.05, 0.1]:\n r.append(r_i)\n rp.append(rp_i)\n gp.append(gp_i)\n velocity.append(f_velocity(r_i, gp_i, rp_i, 0.8, 4.6, 0.9) * np.random.normal(1.0, 0.1))\n elevation.append(f_elevation(r_i, gp_i, rp_i, 84, -0.3) * np.random.normal(1.0, 0.1))\n return pd.DataFrame({ 'r': r, 'gp': gp, 'rp': rp, 'v': velocity, 'l': elevation})\n\n#df = fake_data()\ndf = pd.read_csv('new_results.csv')\n\n# two independent models\nvelocity_model = Model(f_velocity, independent_vars=['r','gp','rp'])\nelevation_model = Model(f_elevation, independent_vars=['r','gp','rp'])\n\nvelocity_fit = velocity_model.fit(df['v'], r = df['r'], gp = df['gp'], rp = df['rp'])\nelevation_fit = elevation_model.fit(df['l'], r = df['r'], gp = df['gp'], rp = df['rp'])\n\nprint(velocity_fit.fit_report())\nprint(elevation_fit.fit_report())\nfor key in velocity_fit.params:\n print(f\"{key} = {velocity_fit.params[key].value} +/- {velocity_fit.params[key].stderr}\")\nfor key in elevation_fit.params:\n print(f\"{key} = {elevation_fit.params[key].value} +/- {elevation_fit.params[key].stderr}\")\n\n\ndf['pred_v'] = [\n f_velocity(r_i, gp_i, rp_i, velocity_fit.params['a_v'].value, velocity_fit.params['b_v'].value, velocity_fit.params['c_v'].value)\n for r_i, gp_i, rp_i in zip(df['r'], df['gp'], df['rp'])\n]\ndf['pred_l'] = [\n f_elevation(r_i, gp_i, rp_i, elevation_fit.params['a_e'].value, elevation_fit.params['b_e'].value)\n for r_i, gp_i, rp_i in zip(df['r'], df['gp'], df['rp'])\n]\n\nplot_results(df, 'v', 'pred_v', 15, \"velocity\")\nplot_results(df, 'l', 'pred_l', 90, \"velocity\")\n"
] | [
[
"numpy.random.normal",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"pandas.unique",
"pandas.read_csv"
]
] |
WillTirone/stats_tools | [
"46f7d203f23049dbb77e0a9f5a3a28a54eec0521"
] | [
"tests/test.py"
] | [
"import unittest\nimport math as m\nimport sys\nimport os\n\nimport numpy as np\nfrom scipy import integrate\nfrom scipy.special import beta\n\n#only necessary while the module is not installed in pckgs\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom applied_stats import continuous_distributions as stats\nfrom applied_stats import mle\n\n\n# defining a random array and sample size for reproducible testing\nrng = np.random.default_rng(1905)\nX_continuous = rng.random(100) * 100\nX_discrete = np.round(X_continuous, 0)\nn = len(X_continuous)\n\n# testing distribution calculations / attributes / methods\nclass Test_Distributions(unittest.TestCase):\n\n def test_norm(self):\n\n #test an instance\n a = stats.Norm_rv(0,1)\n self.assertIsInstance(a, stats.Norm_rv)\n\n #test the probability calculation\n a.probability_calc()\n self.assertAlmostEqual(a.left_probability, 0.5)\n\n #test that it is a pdf by integrating, it must = 1\n f = lambda x: ((1/(a.sigma*m.sqrt(2*m.pi)))*\n m.e**((-1/2)*((x-a.mean)/a.sigma)**2))\n a.probability, a.error_est = integrate.quad(f,-np.inf, np.inf)\n self.assertAlmostEqual(a.probability, 1)\n\n #testing attributes\n self.assertEqual(a.mean,0)\n self.assertEqual(a.variance,1)\n self.assertTrue(a.variance < np.infty)\n \n #two tailed test \n a_two_tail = stats.Norm_rv(10,4,left_cv=7, right_cv=13)\n a_two_tail.probability_calc(two_tail=True, cv_probability=True)\n self.assertAlmostEqual(round(a_two_tail.total_probability,5), 0.13361)\n \n def test_chisq(self):\n\n #test an instance\n b = stats.ChiSq_rv(4)\n self.assertIsInstance(b, stats.ChiSq_rv)\n\n #test the probability calculation\n #will need to adjust for two tail\n b.probability_calc()\n self.assertAlmostEqual(round(b.right_probability,5), .40601)\n\n #test that it is a pdf by integrating, it must = 1\n #TODO: rewrite to use function from .py file\n f = lambda x: ((1/(m.gamma(b.df/2)*2**(b.df/2)))\n *x**((b.df/2)-1)*m.e**(-x/2))\n b.probability, b.error_est = integrate.quad(f,0,np.inf)\n self.assertAlmostEqual(b.probability, 1)\n\n #test some attributes\n self.assertEqual(b.df, 4)\n self.assertEqual(b.mean, 4)\n self.assertEqual(b.variance, 8)\n\n #TODO: add a two tailed test case\n\n def test_t(self):\n\n #test an instance\n c = stats.t_rv(5,crit_value=1)\n self.assertIsInstance(c, stats.t_rv)\n\n #test the probability calculation\n c.probability_calc()\n self.assertAlmostEqual(round(c.probability,5), 0.18161)\n\n #test that it is a pdf by integrating, it must = 1\n f = lambda x: (m.gamma((c.df+1)/2) / (m.sqrt(m.pi * c.df) *\n m.gamma(c.df / 2) * (1 + ((x**2)/c.df))\n **((c.df + 1) / 2)))\n c.probability, c.error_est = integrate.quad(f,-np.inf,np.inf)\n self.assertAlmostEqual(c.probability, 1)\n\n #test some attributes\n self.assertEqual(c.df, 5)\n self.assertEqual(c.mean, 0)\n self.assertEqual(c.variance, 5/3)\n\n def test_F(self):\n\n #test an instance\n d = stats.F_rv(5, 5)\n self.assertIsInstance(d, stats.F_rv)\n\n\n #test the probability calculation\n d.probability_calc()\n self.assertAlmostEqual(round(d.right_probability,2), 0.5)\n\n #test that it is a pdf by integrating, it must = 1\n f = lambda x: ((d.v_2**(d.v_2/2) * d.v_1**(d.v_1/2) *\n x**(d.v_1/2 -1))/\n ((d.v_2 +d.v_1*x)**((d.v_1 + d.v_2)/2) *\n beta(d.v_1/2, d.v_2/2)))\n d.probability, d.error_est = integrate.quad(f,0,np.inf)\n self.assertAlmostEqual(d.probability, 1)\n\n #test some attributes\n self.assertEqual(d.v_1, 5)\n self.assertEqual(d.v_2, 5)\n self.assertEqual(round(d.mean,3), 1.667)\n self.assertEqual(round(d.variance,3), 30.769)\n\n# testing the MLE module to ensure accurate calculations\nclass Test_MLE(unittest.TestCase):\n\n #continuous distributions\n\n def test_uniform(self):\n\n a, e = mle.uniform(X_continuous)\n self.assertEqual(round(a,4), 0.0735) #alpha\n self.assertEqual(round(e,4), 99.0877) #beta\n\n def test_exponential(self):\n\n b = round(mle.exponential(X_continuous),4)\n self.assertEqual(b, 52.1989) #theta\n\n def test_normal(self):\n\n c, d = mle.normal(X_continuous)\n self.assertEqual(round(c,4), 52.1989) #mu\n self.assertEqual(round(d,4), 747.7962) #sigma^2\n\n# discrete distributions\n\n def test_binomial(self):\n b = mle.binomial(100, X_discrete)\n self.assertEqual(round(b,4), 0.5222) #p-hat\n\n def test_geometric(self):\n c = round(mle.geometric(X_discrete),5)\n self.assertEqual(c, 0.01915)#p-hat\n\n def test_poisson(self):\n d = mle.poisson(X_discrete)\n self.assertEqual(d, 52.22)#lambda-hat\n\nclass Test_Hypotheses(unittest.TestCase):\n \n def test_gen_test(self):\n pass\n \nclass Test_conf_intervals(unittest.TestCase):\n \n def test_norm_ci(self):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.round",
"scipy.special.beta",
"numpy.random.default_rng",
"scipy.integrate.quad"
]
] |
arfon/astropy-lambda | [
"76ad397bb678b6649f8c76ff3665623dc1ef64b3"
] | [
"astropy/table/column.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom ..extern import six\nfrom ..extern.six.moves import zip\n\nimport warnings\nimport weakref\nimport re\n\nfrom copy import deepcopy\n\nimport numpy as np\nfrom numpy import ma\n\n# Remove this when Numpy no longer emits this warning and that Numpy version\n# becomes the minimum required version for Astropy.\n# https://github.com/astropy/astropy/issues/6285\ntry:\n from numpy.ma.core import MaskedArrayFutureWarning\nexcept ImportError:\n # For Numpy versions that do not raise this warning.\n MaskedArrayFutureWarning = None\n\nfrom ..units import Unit, Quantity\nfrom ..utils.console import color_print\nfrom ..utils.metadata import MetaData\nfrom ..utils.data_info import BaseColumnInfo, dtype_info_name\nfrom ..utils.misc import dtype_bytes_or_chars\nfrom ..extern.six.moves import range\nfrom . import groups\nfrom . import pprint\nfrom .np_utils import fix_column_name\n\n# These \"shims\" provide __getitem__ implementations for Column and MaskedColumn\nfrom ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim\n\n# Create a generic TableFormatter object for use by bare columns with no\n# parent table.\nFORMATTER = pprint.TableFormatter()\n\n\nclass StringTruncateWarning(UserWarning):\n \"\"\"\n Warning class for when a string column is assigned a value\n that gets truncated because the base (numpy) string length\n is too short.\n\n This does not inherit from AstropyWarning because we want to use\n stacklevel=2 to show the user where the issue occurred in their code.\n \"\"\"\n pass\n\n\n# Always emit this warning, not just the first instance\nwarnings.simplefilter('always', StringTruncateWarning)\n\n\ndef _auto_names(n_cols):\n from . import conf\n return [str(conf.auto_colname).format(i) for i in range(n_cols)]\n\n\n# list of one and two-dimensional comparison functions, which sometimes return\n# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure\n# they only return plain (masked) arrays (see #1446 and #1685)\n_comparison_functions = set(\n [np.greater, np.greater_equal, np.less, np.less_equal,\n np.not_equal, np.equal,\n np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])\n\n\ndef col_copy(col, copy_indices=True):\n \"\"\"\n Mixin-safe version of Column.copy() (with copy_data=True).\n\n Parameters\n ----------\n col : Column or mixin column\n Input column\n copy_indices : bool\n Copy the column ``indices`` attribute\n\n Returns\n -------\n col : Copy of input column\n \"\"\"\n if isinstance(col, BaseColumn):\n return col.copy()\n\n # The new column should have None for the parent_table ref. If the\n # original parent_table weakref there at the point of copying then it\n # generates an infinite recursion. Instead temporarily remove the weakref\n # on the original column and restore after the copy in an exception-safe\n # manner.\n\n parent_table = col.info.parent_table\n indices = col.info.indices\n col.info.parent_table = None\n col.info.indices = []\n\n try:\n newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)\n newcol.info = col.info\n newcol.info.indices = deepcopy(indices or []) if copy_indices else []\n for index in newcol.info.indices:\n index.replace_col(col, newcol)\n finally:\n col.info.parent_table = parent_table\n col.info.indices = indices\n\n return newcol\n\n\nclass FalseArray(np.ndarray):\n \"\"\"\n Boolean mask array that is always False.\n\n This is used to create a stub ``mask`` property which is a boolean array of\n ``False`` used by default for mixin columns and corresponding to the mixin\n column data shape. The ``mask`` looks like a normal numpy array but an\n exception will be raised if ``True`` is assigned to any element. The\n consequences of the limitation are most obvious in the high-level table\n operations.\n\n Parameters\n ----------\n shape : tuple\n Data shape\n \"\"\"\n def __new__(cls, shape):\n obj = np.zeros(shape, dtype=np.bool).view(cls)\n return obj\n\n def __setitem__(self, item, val):\n val = np.asarray(val)\n if np.any(val):\n raise ValueError('Cannot set any element of {0} class to True'\n .format(self.__class__.__name__))\n\n if six.PY2: # avoid falling back to ndarray.__setslice__\n def __setslice__(self, start, stop, val):\n self.__setitem__(slice(start, stop), val)\n\n\nclass ColumnInfo(BaseColumnInfo):\n \"\"\"\n Container for meta information like name, description, format.\n\n This is required when the object is used as a mixin column within a table,\n but can be used as a general way to store meta information.\n \"\"\"\n attrs_from_parent = BaseColumnInfo.attr_names\n _supports_indexing = True\n\n def new_like(self, cols, length, metadata_conflicts='warn', name=None):\n \"\"\"\n Return a new Column instance which is consistent with the\n input ``cols`` and has ``length`` rows.\n\n This is intended for creating an empty column object whose elements can\n be set in-place for table operations like join or vstack.\n\n Parameters\n ----------\n cols : list\n List of input columns\n length : int\n Length of the output column object\n metadata_conflicts : str ('warn'|'error'|'silent')\n How to handle metadata conflicts\n name : str\n Output column name\n\n Returns\n -------\n col : Column (or subclass)\n New instance of this class consistent with ``cols``\n\n \"\"\"\n attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,\n ('meta', 'unit', 'format', 'description'))\n\n return self._parent_cls(length=length, **attrs)\n\n\nclass BaseColumn(_ColumnGetitemShim, np.ndarray):\n\n meta = MetaData()\n\n def __new__(cls, data=None, name=None,\n dtype=None, shape=(), length=0,\n description=None, unit=None, format=None, meta=None,\n copy=False, copy_indices=True):\n if data is None:\n dtype = (np.dtype(dtype).str, shape)\n self_data = np.zeros(length, dtype=dtype)\n elif isinstance(data, BaseColumn) and hasattr(data, '_name'):\n # When unpickling a MaskedColumn, ``data`` will be a bare\n # BaseColumn with none of the expected attributes. In this case\n # do NOT execute this block which initializes from ``data``\n # attributes.\n self_data = np.array(data.data, dtype=dtype, copy=copy)\n if description is None:\n description = data.description\n if unit is None:\n unit = unit or data.unit\n if format is None:\n format = data.format\n if meta is None:\n meta = deepcopy(data.meta)\n if name is None:\n name = data.name\n elif isinstance(data, Quantity):\n if unit is None:\n self_data = np.array(data, dtype=dtype, copy=copy)\n unit = data.unit\n else:\n self_data = np.array(data.to(unit), dtype=dtype, copy=copy)\n if description is None:\n description = data.info.description\n if format is None:\n format = data.info.format\n if meta is None:\n meta = deepcopy(data.info.meta)\n\n else:\n if not six.PY2 and np.dtype(dtype).char == 'S':\n data = cls._encode_str(data)\n self_data = np.array(data, dtype=dtype, copy=copy)\n\n self = self_data.view(cls)\n self._name = fix_column_name(name)\n self.unit = unit\n self.format = format\n self.description = description\n self.meta = meta\n self._parent_table = None\n self.indices = deepcopy(getattr(data, 'indices', [])) if \\\n copy_indices else []\n for index in self.indices:\n index.replace_col(data, self)\n\n return self\n\n @property\n def data(self):\n return self.view(np.ndarray)\n\n @property\n def parent_table(self):\n if self._parent_table is None:\n return None\n else:\n return self._parent_table()\n\n @parent_table.setter\n def parent_table(self, table):\n if table is None:\n self._parent_table = None\n else:\n self._parent_table = weakref.ref(table)\n\n info = ColumnInfo()\n\n def copy(self, order='C', data=None, copy_data=True):\n \"\"\"\n Return a copy of the current instance.\n\n If ``data`` is supplied then a view (reference) of ``data`` is used,\n and ``copy_data`` is ignored.\n\n Parameters\n ----------\n order : {'C', 'F', 'A', 'K'}, optional\n Controls the memory layout of the copy. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,\n 'C' otherwise. 'K' means match the layout of ``a`` as closely\n as possible. (Note that this function and :func:numpy.copy are very\n similar, but have different default values for their order=\n arguments.) Default is 'C'.\n data : array, optional\n If supplied then use a view of ``data`` instead of the instance\n data. This allows copying the instance attributes and meta.\n copy_data : bool, optional\n Make a copy of the internal numpy array instead of using a\n reference. Default is True.\n\n Returns\n -------\n col : Column or MaskedColumn\n Copy of the current column (same type as original)\n \"\"\"\n if data is None:\n data = self.data\n if copy_data:\n data = data.copy(order)\n\n out = data.view(self.__class__)\n out.__array_finalize__(self)\n # for MaskedColumn, MaskedArray.__array_finalize__ also copies mask\n # from self, which is not the idea here, so undo\n if isinstance(self, MaskedColumn):\n out._mask = data._mask\n\n self._copy_groups(out)\n\n return out\n\n def __setstate__(self, state):\n \"\"\"\n Restore the internal state of the Column/MaskedColumn for pickling\n purposes. This requires that the last element of ``state`` is a\n 5-tuple that has Column-specific state values.\n \"\"\"\n # Get the Column attributes\n names = ('_name', 'unit', 'format', 'description', 'meta', 'indices')\n attrs = {name: val for name, val in zip(names, state[-1])}\n\n state = state[:-1]\n\n # Using super(type(self), self).__setstate__() gives an infinite\n # recursion. Manually call the right super class to actually set up\n # the array object.\n super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray\n super_class.__setstate__(self, state)\n\n # Set the Column attributes\n for name, val in attrs.items():\n setattr(self, name, val)\n self._parent_table = None\n\n def __reduce__(self):\n \"\"\"\n Return a 3-tuple for pickling a Column. Use the super-class\n functionality but then add in a 5-tuple of Column-specific values\n that get used in __setstate__.\n \"\"\"\n super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray\n reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)\n\n # Define Column-specific attrs and meta that gets added to state.\n column_state = (self.name, self.unit, self.format, self.description,\n self.meta, self.indices)\n state = state + (column_state,)\n\n return reconstruct_func, reconstruct_func_args, state\n\n def __array_finalize__(self, obj):\n # Obj will be none for direct call to Column() creator\n if obj is None:\n return\n\n if six.callable(super(BaseColumn, self).__array_finalize__):\n super(BaseColumn, self).__array_finalize__(obj)\n\n # Self was created from template (e.g. obj[slice] or (obj * 2))\n # or viewcast e.g. obj.view(Column). In either case we want to\n # init Column attributes for self from obj if possible.\n self.parent_table = None\n if not hasattr(self, 'indices'): # may have been copied in __new__\n self.indices = []\n self._copy_attrs(obj)\n\n def __array_wrap__(self, out_arr, context=None):\n \"\"\"\n __array_wrap__ is called at the end of every ufunc.\n\n Normally, we want a Column object back and do not have to do anything\n special. But there are two exceptions:\n\n 1) If the output shape is different (e.g. for reduction ufuncs\n like sum() or mean()), a Column still linking to a parent_table\n makes little sense, so we return the output viewed as the\n column content (ndarray or MaskedArray).\n For this case, we use \"[()]\" to select everything, and to ensure we\n convert a zero rank array to a scalar. (For some reason np.sum()\n returns a zero rank scalar array while np.mean() returns a scalar;\n So the [()] is needed for this case.\n\n 2) When the output is created by any function that returns a boolean\n we also want to consistently return an array rather than a column\n (see #1446 and #1685)\n \"\"\"\n out_arr = super(BaseColumn, self).__array_wrap__(out_arr, context)\n if (self.shape != out_arr.shape or\n (isinstance(out_arr, BaseColumn) and\n (context is not None and context[0] in _comparison_functions))):\n return out_arr.data[()]\n else:\n return out_arr\n\n @property\n def name(self):\n \"\"\"\n The name of this column.\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, val):\n val = fix_column_name(val)\n\n if self.parent_table is not None:\n table = self.parent_table\n table.columns._rename_column(self.name, val)\n\n self._name = val\n\n @property\n def descr(self):\n \"\"\"Array-interface compliant full description of the column.\n\n This returns a 3-tuple (name, type, shape) that can always be\n used in a structured array dtype definition.\n \"\"\"\n return (self.name, self.dtype.str, self.shape[1:])\n\n def iter_str_vals(self):\n \"\"\"\n Return an iterator that yields the string-formatted values of this\n column.\n\n Returns\n -------\n str_vals : iterator\n Column values formatted as strings\n \"\"\"\n # Iterate over formatted values with no max number of lines, no column\n # name, no unit, and ignoring the returned header info in outs.\n _pformat_col_iter = self._formatter._pformat_col_iter\n for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,\n show_dtype=False, outs={}):\n yield str_val\n\n def attrs_equal(self, col):\n \"\"\"Compare the column attributes of ``col`` to this object.\n\n The comparison attributes are: ``name``, ``unit``, ``dtype``,\n ``format``, ``description``, and ``meta``.\n\n Parameters\n ----------\n col : Column\n Comparison column\n\n Returns\n -------\n equal : boolean\n True if all attributes are equal\n \"\"\"\n if not isinstance(col, BaseColumn):\n raise ValueError('Comparison `col` must be a Column or '\n 'MaskedColumn object')\n\n attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')\n equal = all(getattr(self, x) == getattr(col, x) for x in attrs)\n\n return equal\n\n @property\n def _formatter(self):\n return FORMATTER if (self.parent_table is None) else self.parent_table.formatter\n\n def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,\n html=False):\n \"\"\"Return a list of formatted string representation of column values.\n\n If no value of ``max_lines`` is supplied then the height of the\n screen terminal is used to set ``max_lines``. If the terminal\n height cannot be determined then the default will be\n determined using the ``astropy.conf.max_lines`` configuration\n item. If a negative value of ``max_lines`` is supplied then\n there is no line limit applied.\n\n Parameters\n ----------\n max_lines : int\n Maximum lines of output (header + data rows)\n\n show_name : bool\n Include column name. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is False.\n\n show_dtype : bool\n Include column dtype. Default is False.\n\n html : bool\n Format the output as an HTML table. Default is False.\n\n Returns\n -------\n lines : list\n List of lines with header and formatted column values\n\n \"\"\"\n _pformat_col = self._formatter._pformat_col\n lines, outs = _pformat_col(self, max_lines, show_name=show_name,\n show_unit=show_unit, show_dtype=show_dtype,\n html=html)\n return lines\n\n def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):\n \"\"\"Print a formatted string representation of column values.\n\n If no value of ``max_lines`` is supplied then the height of the\n screen terminal is used to set ``max_lines``. If the terminal\n height cannot be determined then the default will be\n determined using the ``astropy.conf.max_lines`` configuration\n item. If a negative value of ``max_lines`` is supplied then\n there is no line limit applied.\n\n Parameters\n ----------\n max_lines : int\n Maximum number of values in output\n\n show_name : bool\n Include column name. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is False.\n\n show_dtype : bool\n Include column dtype. Default is True.\n \"\"\"\n _pformat_col = self._formatter._pformat_col\n lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,\n show_dtype=show_dtype)\n\n n_header = outs['n_header']\n for i, line in enumerate(lines):\n if i < n_header:\n color_print(line, 'red')\n else:\n print(line)\n\n def more(self, max_lines=None, show_name=True, show_unit=False):\n \"\"\"Interactively browse column with a paging interface.\n\n Supported keys::\n\n f, <space> : forward one page\n b : back one page\n r : refresh same page\n n : next row\n p : previous row\n < : go to beginning\n > : go to end\n q : quit browsing\n h : print this help\n\n Parameters\n ----------\n max_lines : int\n Maximum number of lines in table output.\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is False.\n\n \"\"\"\n _more_tabcol = self._formatter._more_tabcol\n _more_tabcol(self, max_lines=max_lines, show_name=show_name,\n show_unit=show_unit)\n\n @property\n def unit(self):\n \"\"\"\n The unit associated with this column. May be a string or a\n `astropy.units.UnitBase` instance.\n\n Setting the ``unit`` property does not change the values of the\n data. To perform a unit conversion, use ``convert_unit_to``.\n \"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n if unit is None:\n self._unit = None\n else:\n self._unit = Unit(unit, parse_strict='silent')\n\n @unit.deleter\n def unit(self):\n self._unit = None\n\n def convert_unit_to(self, new_unit, equivalencies=[]):\n \"\"\"\n Converts the values of the column in-place from the current\n unit to the given unit.\n\n To change the unit associated with this column without\n actually changing the data values, simply set the ``unit``\n property.\n\n Parameters\n ----------\n new_unit : str or `astropy.units.UnitBase` instance\n The unit to convert to.\n\n equivalencies : list of equivalence pairs, optional\n A list of equivalence pairs to try if the unit are not\n directly convertible. See :ref:`unit_equivalencies`.\n\n Raises\n ------\n astropy.units.UnitsError\n If units are inconsistent\n \"\"\"\n if self.unit is None:\n raise ValueError(\"No unit set on column\")\n self.data[:] = self.unit.to(\n new_unit, self.data, equivalencies=equivalencies)\n self.unit = new_unit\n\n @property\n def groups(self):\n if not hasattr(self, '_groups'):\n self._groups = groups.ColumnGroups(self)\n return self._groups\n\n def group_by(self, keys):\n \"\"\"\n Group this column by the specified ``keys``\n\n This effectively splits the column into groups which correspond to\n unique values of the ``keys`` grouping object. The output is a new\n `Column` or `MaskedColumn` which contains a copy of this column but\n sorted by row according to ``keys``.\n\n The ``keys`` input to ``group_by`` must be a numpy array with the\n same length as this column.\n\n Parameters\n ----------\n keys : numpy array\n Key grouping object\n\n Returns\n -------\n out : Column\n New column with groups attribute set accordingly\n \"\"\"\n return groups.column_group_by(self, keys)\n\n def _copy_groups(self, out):\n \"\"\"\n Copy current groups into a copy of self ``out``\n \"\"\"\n if self.parent_table:\n if hasattr(self.parent_table, '_groups'):\n out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)\n elif hasattr(self, '_groups'):\n out._groups = groups.ColumnGroups(out, indices=self._groups._indices)\n\n # Strip off the BaseColumn-ness for repr and str so that\n # MaskedColumn.data __repr__ does not include masked_BaseColumn(data =\n # [1 2], ...).\n def __repr__(self):\n return np.asarray(self).__repr__()\n\n @property\n def quantity(self):\n \"\"\"\n A view of this table column as a `~astropy.units.Quantity` object with\n units given by the Column's `unit` parameter.\n \"\"\"\n # the Quantity initializer is used here because it correctly fails\n # if the column's values are non-numeric (like strings), while .view\n # will happily return a quantity with gibberish for numerical values\n return Quantity(self, copy=False, dtype=self.dtype, order='A')\n\n def to(self, unit, equivalencies=[], **kwargs):\n \"\"\"\n Converts this table column to a `~astropy.units.Quantity` object with\n the requested units.\n\n Parameters\n ----------\n unit : `~astropy.units.Unit` or str\n The unit to convert to (i.e., a valid argument to the\n :meth:`astropy.units.Quantity.to` method).\n equivalencies : list of equivalence pairs, optional\n Equivalencies to use for this conversion. See\n :meth:`astropy.units.Quantity.to` for more details.\n\n Returns\n -------\n quantity : `~astropy.units.Quantity`\n A quantity object with the contents of this column in the units\n ``unit``.\n \"\"\"\n return self.quantity.to(unit, equivalencies)\n\n def _copy_attrs(self, obj):\n \"\"\"\n Copy key column attributes from ``obj`` to self\n \"\"\"\n for attr in ('name', 'unit', 'format', 'description'):\n val = getattr(obj, attr, None)\n setattr(self, attr, val)\n self.meta = deepcopy(getattr(obj, 'meta', {}))\n\n @staticmethod\n def _encode_str(value):\n \"\"\"\n Encode anything that is unicode-ish as utf-8. This method is only\n called for Py3+.\n \"\"\"\n if isinstance(value, str):\n value = value.encode('utf-8')\n elif isinstance(value, bytes) or value is np.ma.masked:\n pass\n else:\n arr = np.asarray(value)\n if arr.dtype.char == 'U':\n arr = np.char.encode(arr, encoding='utf-8')\n if isinstance(value, np.ma.MaskedArray):\n arr = np.ma.array(arr, mask=value.mask, copy=False)\n value = arr\n\n return value\n\n\nclass Column(BaseColumn):\n \"\"\"Define a data column for use in a Table object.\n\n Parameters\n ----------\n data : list, ndarray or None\n Column data values\n name : str\n Column name and key for reference within Table\n dtype : numpy.dtype compatible value\n Data type for column\n shape : tuple or ()\n Dimensions of a single row element in the column data\n length : int or 0\n Number of row elements in column data\n description : str or None\n Full description of column\n unit : str or None\n Physical unit\n format : str or None or function or callable\n Format string for outputting column values. This can be an\n \"old-style\" (``format % value``) or \"new-style\" (`str.format`)\n format specification string or a function or any callable object that\n accepts a single value and returns a string.\n meta : dict-like or None\n Meta-data associated with the column\n\n Examples\n --------\n A Column can be created in two different ways:\n\n - Provide a ``data`` value but not ``shape`` or ``length`` (which are\n inferred from the data).\n\n Examples::\n\n col = Column(data=[1, 2], name='name') # shape=(2,)\n col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)\n col = Column(data=[1, 2], name='name', dtype=float)\n col = Column(data=np.array([1, 2]), name='name')\n col = Column(data=['hello', 'world'], name='name')\n\n The ``dtype`` argument can be any value which is an acceptable\n fixed-size data-type initializer for the numpy.dtype() method. See\n `<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.\n Examples include:\n\n - Python non-string type (float, int, bool)\n - Numpy non-string type (e.g. np.float32, np.int64, np.bool)\n - Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')\n\n If no ``dtype`` value is provide then the type is inferred using\n ``np.array(data)``.\n\n - Provide ``length`` and optionally ``shape``, but not ``data``\n\n Examples::\n\n col = Column(name='name', length=5)\n col = Column(name='name', dtype=int, length=10, shape=(3,4))\n\n The default ``dtype`` is ``np.float64``. The ``shape`` argument is the\n array shape of a single cell in the column.\n \"\"\"\n\n def __new__(cls, data=None, name=None,\n dtype=None, shape=(), length=0,\n description=None, unit=None, format=None, meta=None,\n copy=False, copy_indices=True):\n\n if isinstance(data, MaskedColumn) and np.any(data.mask):\n raise TypeError(\"Cannot convert a MaskedColumn with masked value to a Column\")\n\n self = super(Column, cls).__new__(cls, data=data, name=name, dtype=dtype,\n shape=shape, length=length, description=description,\n unit=unit, format=format, meta=meta,\n copy=copy, copy_indices=copy_indices)\n return self\n\n def __setattr__(self, item, value):\n if not isinstance(self, MaskedColumn) and item == \"mask\":\n raise AttributeError(\"cannot set mask value to a column in non-masked Table\")\n super(Column, self).__setattr__(item, value)\n\n if item == 'unit' and issubclass(self.dtype.type, np.number):\n try:\n converted = self.parent_table._convert_col_for_table(self)\n except AttributeError: # Either no parent table or parent table is None\n pass\n else:\n if converted is not self:\n self.parent_table.replace_column(self.name, converted)\n\n def _base_repr_(self, html=False):\n # If scalar then just convert to correct numpy type and use numpy repr\n if self.ndim == 0:\n return repr(self.item())\n\n descr_vals = [self.__class__.__name__]\n unit = None if self.unit is None else str(self.unit)\n shape = None if self.ndim <= 1 else self.shape[1:]\n for attr, val in (('name', self.name),\n ('dtype', dtype_info_name(self.dtype)),\n ('shape', shape),\n ('unit', unit),\n ('format', self.format),\n ('description', self.description),\n ('length', len(self))):\n\n if val is not None:\n descr_vals.append('{0}={1!r}'.format(attr, val))\n\n descr = '<' + ' '.join(descr_vals) + '>\\n'\n\n if html:\n from ..utils.xml.writer import xml_escape\n descr = xml_escape(descr)\n\n data_lines, outs = self._formatter._pformat_col(\n self, show_name=False, show_unit=False, show_length=False, html=html)\n\n out = descr + '\\n'.join(data_lines)\n if six.PY2 and isinstance(out, six.text_type):\n out = out.encode('utf-8')\n\n return out\n\n def _repr_html_(self):\n return self._base_repr_(html=True)\n\n def __repr__(self):\n return self._base_repr_(html=False)\n\n def __unicode__(self):\n # If scalar then just convert to correct numpy type and use numpy repr\n if self.ndim == 0:\n return str(self.item())\n\n lines, outs = self._formatter._pformat_col(self)\n return '\\n'.join(lines)\n if not six.PY2:\n __str__ = __unicode__\n\n def __bytes__(self):\n return six.text_type(self).encode('utf-8')\n if six.PY2:\n __str__ = __bytes__\n\n def _check_string_truncate(self, value):\n \"\"\"\n Emit a warning if any elements of ``value`` will be truncated when\n ``value`` is assigned to self.\n \"\"\"\n # Convert input ``value`` to the string dtype of this column and\n # find the length of the longest string in the array.\n value = np.asanyarray(value, dtype=self.dtype.type)\n if value.size == 0:\n return\n value_str_len = np.char.str_len(value).max()\n\n # Parse the array-protocol typestring (e.g. '|U15') of self.dtype which\n # has the character repeat count on the right side.\n self_str_len = dtype_bytes_or_chars(self.dtype)\n\n if value_str_len > self_str_len:\n warnings.warn('truncated right side string(s) longer than {} '\n 'character(s) during assignment'\n .format(self_str_len),\n StringTruncateWarning,\n stacklevel=3)\n\n def __setitem__(self, index, value):\n if not six.PY2 and self.dtype.char == 'S':\n value = self._encode_str(value)\n\n # Issue warning for string assignment that truncates ``value``\n if issubclass(self.dtype.type, np.character):\n self._check_string_truncate(value)\n\n # update indices\n self.info.adjust_indices(index, value, len(self))\n\n # Set items using a view of the underlying data, as it gives an\n # order-of-magnitude speed-up. [#2994]\n self.data[index] = value\n\n if six.PY2:\n # avoid falling through to ndarray.__setslice__, instead using\n # self.__setitem__, which is much faster (see above). [#3020]\n def __setslice__(self, start, stop, value):\n self.__setitem__(slice(start, stop), value)\n\n def _make_compare(oper):\n \"\"\"\n Make comparison methods which encode the ``other`` object to utf-8\n in the case of a bytestring dtype for Py3+.\n \"\"\"\n swapped_oper = {'__eq__': '__eq__',\n '__ne__': '__ne__',\n '__gt__': '__lt__',\n '__lt__': '__gt__',\n '__ge__': '__le__',\n '__le__': '__ge__'}[oper]\n\n def _compare(self, other):\n op = oper # copy enclosed ref to allow swap below\n\n # Special case to work around #6838. Other combinations work OK,\n # see tests.test_column.test_unicode_sandwich_compare(). In this\n # case just swap self and other.\n #\n # This is related to an issue in numpy that was addressed in np 1.13.\n # However that fix does not make this problem go away, but maybe\n # future numpy versions will do so. NUMPY_LT_1_13 to get the\n # attention of future maintainers to check (by deleting or versioning\n # the if block below). See #6899 discussion.\n if (isinstance(self, MaskedColumn) and self.dtype.kind == 'U' and\n isinstance(other, MaskedColumn) and other.dtype.kind == 'S'):\n self, other = other, self\n op = swapped_oper\n\n if not six.PY2 and self.dtype.char == 'S':\n other = self._encode_str(other)\n return getattr(self.data, op)(other)\n\n return _compare\n\n __eq__ = _make_compare('__eq__')\n __ne__ = _make_compare('__ne__')\n __gt__ = _make_compare('__gt__')\n __lt__ = _make_compare('__lt__')\n __ge__ = _make_compare('__ge__')\n __le__ = _make_compare('__le__')\n\n def insert(self, obj, values, axis=0):\n \"\"\"\n Insert values before the given indices in the column and return\n a new `~astropy.table.Column` object.\n\n Parameters\n ----------\n obj : int, slice or sequence of ints\n Object that defines the index or indices before which ``values`` is\n inserted.\n values : array_like\n Value(s) to insert. If the type of ``values`` is different\n from that of quantity, ``values`` is converted to the matching type.\n ``values`` should be shaped so that it can be broadcast appropriately\n axis : int, optional\n Axis along which to insert ``values``. If ``axis`` is None then\n the column array is flattened before insertion. Default is 0,\n which will insert a row.\n\n Returns\n -------\n out : `~astropy.table.Column`\n A copy of column with ``values`` and ``mask`` inserted. Note that the\n insertion does not occur in-place: a new column is returned.\n \"\"\"\n if self.dtype.kind == 'O':\n # Even if values is array-like (e.g. [1,2,3]), insert as a single\n # object. Numpy.insert instead inserts each element in an array-like\n # input individually.\n data = np.insert(self, obj, None, axis=axis)\n data[obj] = values\n else:\n # Explicitly convert to dtype of this column. Needed because numpy 1.7\n # enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.\n values = np.asarray(values, dtype=self.dtype)\n data = np.insert(self, obj, values, axis=axis)\n out = data.view(self.__class__)\n out.__array_finalize__(self)\n return out\n\n # We do this to make the methods show up in the API docs\n name = BaseColumn.name\n unit = BaseColumn.unit\n copy = BaseColumn.copy\n more = BaseColumn.more\n pprint = BaseColumn.pprint\n pformat = BaseColumn.pformat\n convert_unit_to = BaseColumn.convert_unit_to\n quantity = BaseColumn.quantity\n to = BaseColumn.to\n\n\nclass MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):\n \"\"\"Define a masked data column for use in a Table object.\n\n Parameters\n ----------\n data : list, ndarray or None\n Column data values\n name : str\n Column name and key for reference within Table\n mask : list, ndarray or None\n Boolean mask for which True indicates missing or invalid data\n fill_value : float, int, str or None\n Value used when filling masked column elements\n dtype : numpy.dtype compatible value\n Data type for column\n shape : tuple or ()\n Dimensions of a single row element in the column data\n length : int or 0\n Number of row elements in column data\n description : str or None\n Full description of column\n unit : str or None\n Physical unit\n format : str or None or function or callable\n Format string for outputting column values. This can be an\n \"old-style\" (``format % value``) or \"new-style\" (`str.format`)\n format specification string or a function or any callable object that\n accepts a single value and returns a string.\n meta : dict-like or None\n Meta-data associated with the column\n\n Examples\n --------\n A MaskedColumn is similar to a Column except that it includes ``mask`` and\n ``fill_value`` attributes. It can be created in two different ways:\n\n - Provide a ``data`` value but not ``shape`` or ``length`` (which are\n inferred from the data).\n\n Examples::\n\n col = MaskedColumn(data=[1, 2], name='name')\n col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])\n col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)\n\n The ``mask`` argument will be cast as a boolean array and specifies\n which elements are considered to be missing or invalid.\n\n The ``dtype`` argument can be any value which is an acceptable\n fixed-size data-type initializer for the numpy.dtype() method. See\n `<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.\n Examples include:\n\n - Python non-string type (float, int, bool)\n - Numpy non-string type (e.g. np.float32, np.int64, np.bool)\n - Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')\n\n If no ``dtype`` value is provide then the type is inferred using\n ``np.array(data)``. When ``data`` is provided then the ``shape``\n and ``length`` arguments are ignored.\n\n - Provide ``length`` and optionally ``shape``, but not ``data``\n\n Examples::\n\n col = MaskedColumn(name='name', length=5)\n col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))\n\n The default ``dtype`` is ``np.float64``. The ``shape`` argument is the\n array shape of a single cell in the column.\n \"\"\"\n\n def __new__(cls, data=None, name=None, mask=None, fill_value=None,\n dtype=None, shape=(), length=0,\n description=None, unit=None, format=None, meta=None,\n copy=False, copy_indices=True):\n\n if mask is None and hasattr(data, 'mask'):\n mask = data.mask\n else:\n mask = deepcopy(mask)\n\n # Create self using MaskedArray as a wrapper class, following the example of\n # class MSubArray in\n # https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py\n # This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and\n # https://github.com/astropy/astropy/commit/ff6039e8)\n\n # First just pass through all args and kwargs to BaseColumn, then wrap that object\n # with MaskedArray.\n self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,\n unit=unit, format=format, description=description,\n meta=meta, copy=copy, copy_indices=copy_indices)\n self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)\n\n # Note: do not set fill_value in the MaskedArray constructor because this does not\n # go through the fill_value workarounds.\n if fill_value is None and getattr(data, 'fill_value', None) is not None:\n # Coerce the fill_value to the correct type since `data` may be a\n # different dtype than self.\n fill_value = self.dtype.type(data.fill_value)\n self.fill_value = fill_value\n\n self.parent_table = None\n\n # needs to be done here since self doesn't come from BaseColumn.__new__\n for index in self.indices:\n index.replace_col(self_data, self)\n\n return self\n\n @property\n def fill_value(self):\n return self.get_fill_value() # defer to native ma.MaskedArray method\n\n @fill_value.setter\n def fill_value(self, val):\n \"\"\"Set fill value both in the masked column view and in the parent table\n if it exists. Setting one or the other alone doesn't work.\"\"\"\n\n # another ma bug workaround: If the value of fill_value for a string array is\n # requested but not yet set then it gets created as 'N/A'. From this point onward\n # any new fill_values are truncated to 3 characters. Note that this does not\n # occur if the masked array is a structured array (as in the previous block that\n # deals with the parent table).\n #\n # >>> x = ma.array(['xxxx'])\n # >>> x.fill_value # fill_value now gets represented as an 'S3' array\n # 'N/A'\n # >>> x.fill_value='yyyy'\n # >>> x.fill_value\n # 'yyy'\n #\n # To handle this we are forced to reset a private variable first:\n self._fill_value = None\n\n self.set_fill_value(val) # defer to native ma.MaskedArray method\n\n @property\n def data(self):\n out = self.view(ma.MaskedArray)\n # The following is necessary because of a bug in Numpy, which was\n # fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.\n out.fill_value = self.fill_value\n return out\n\n def filled(self, fill_value=None):\n \"\"\"Return a copy of self, with masked values filled with a given value.\n\n Parameters\n ----------\n fill_value : scalar; optional\n The value to use for invalid entries (`None` by default). If\n `None`, the ``fill_value`` attribute of the array is used\n instead.\n\n Returns\n -------\n filled_column : Column\n A copy of ``self`` with masked entries replaced by `fill_value`\n (be it the function argument or the attribute of ``self``).\n \"\"\"\n if fill_value is None:\n fill_value = self.fill_value\n\n data = super(MaskedColumn, self).filled(fill_value)\n # Use parent table definition of Column if available\n column_cls = self.parent_table.Column if (self.parent_table is not None) else Column\n out = column_cls(name=self.name, data=data, unit=self.unit,\n format=self.format, description=self.description,\n meta=deepcopy(self.meta))\n return out\n\n def insert(self, obj, values, mask=None, axis=0):\n \"\"\"\n Insert values along the given axis before the given indices and return\n a new `~astropy.table.MaskedColumn` object.\n\n Parameters\n ----------\n obj : int, slice or sequence of ints\n Object that defines the index or indices before which ``values`` is\n inserted.\n values : array_like\n Value(s) to insert. If the type of ``values`` is different\n from that of quantity, ``values`` is converted to the matching type.\n ``values`` should be shaped so that it can be broadcast appropriately\n mask : boolean array_like\n Mask value(s) to insert. If not supplied then False is used.\n axis : int, optional\n Axis along which to insert ``values``. If ``axis`` is None then\n the column array is flattened before insertion. Default is 0,\n which will insert a row.\n\n Returns\n -------\n out : `~astropy.table.MaskedColumn`\n A copy of column with ``values`` and ``mask`` inserted. Note that the\n insertion does not occur in-place: a new masked column is returned.\n \"\"\"\n self_ma = self.data # self viewed as MaskedArray\n\n if self.dtype.kind == 'O':\n # Even if values is array-like (e.g. [1,2,3]), insert as a single\n # object. Numpy.insert instead inserts each element in an array-like\n # input individually.\n new_data = np.insert(self_ma.data, obj, None, axis=axis)\n new_data[obj] = values\n else:\n # Explicitly convert to dtype of this column. Needed because numpy 1.7\n # enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.\n values = np.asarray(values, dtype=self.dtype)\n new_data = np.insert(self_ma.data, obj, values, axis=axis)\n\n if mask is None:\n if self.dtype.kind == 'O':\n mask = False\n else:\n mask = np.zeros(values.shape, dtype=np.bool)\n new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)\n new_ma = np.ma.array(new_data, mask=new_mask, copy=False)\n\n out = new_ma.view(self.__class__)\n out.parent_table = None\n out.indices = []\n out._copy_attrs(self)\n\n return out\n\n def _copy_attrs_slice(self, out):\n # Fixes issue #3023: when calling getitem with a MaskedArray subclass\n # the original object attributes are not copied.\n if out.__class__ is self.__class__:\n out.parent_table = None\n # we need this because __getitem__ does a shallow copy of indices\n if out.indices is self.indices:\n out.indices = []\n out._copy_attrs(self)\n return out\n\n def __setitem__(self, index, value):\n # Issue warning for string assignment that truncates ``value``\n if not six.PY2 and self.dtype.char == 'S':\n value = self._encode_str(value)\n\n if issubclass(self.dtype.type, np.character):\n # Account for a bug in np.ma.MaskedArray setitem.\n # https://github.com/numpy/numpy/issues/8624\n value = np.ma.asanyarray(value, dtype=self.dtype.type)\n\n # Check for string truncation after filling masked items with\n # empty (zero-length) string. Note that filled() does not make\n # a copy if there are no masked items.\n self._check_string_truncate(value.filled(''))\n\n # update indices\n self.info.adjust_indices(index, value, len(self))\n\n # Remove this when Numpy no longer emits this warning and that\n # Numpy version becomes the minimum required version for Astropy.\n # https://github.com/astropy/astropy/issues/6285\n if MaskedArrayFutureWarning is None:\n ma.MaskedArray.__setitem__(self, index, value)\n else:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', MaskedArrayFutureWarning)\n ma.MaskedArray.__setitem__(self, index, value)\n\n # We do this to make the methods show up in the API docs\n name = BaseColumn.name\n copy = BaseColumn.copy\n more = BaseColumn.more\n pprint = BaseColumn.pprint\n pformat = BaseColumn.pformat\n convert_unit_to = BaseColumn.convert_unit_to\n"
] | [
[
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.ma.MaskedArray.__setitem__",
"numpy.ma.asanyarray",
"numpy.any",
"numpy.asanyarray",
"numpy.ma.array",
"numpy.char.str_len",
"numpy.ma.MaskedArray.__new__",
"numpy.char.encode",
"numpy.insert",
"numpy.dtype"
]
] |
davidie/Text-Pairs-Relation-Classification | [
"fc4d7a1db2cdb32e048d8a56625d8686e5c25c36"
] | [
"ANN/test_ann.py"
] | [
"# -*- coding:utf-8 -*-\n__author__ = 'Randolph'\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom utils import data_helpers as dh\n\n# Parameters\n# ==================================================\n\nlogger = dh.logger_fn('tflog', 'logs/test-{0}.log'.format(time.asctime()))\n\nMODEL = input(\"☛ Please input the model file you want to test, it should be like(1490175368): \")\n\nwhile not (MODEL.isdigit() and len(MODEL) == 10):\n MODEL = input('✘ The format of your input is illegal, it should be like(1490175368), please re-input: ')\nlogger.info('✔︎ The format of your input is legal, now loading to next step...')\n\nTRAININGSET_DIR = '../data/Train.json'\nVALIDATIONSET_DIR = '../data/Validation.json'\nTESTSET_DIR = '../data/Test.json'\nMODEL_DIR = 'runs/' + MODEL + '/checkpoints/'\nSAVE_DIR = 'results/' + MODEL\n\n# Data Parameters\ntf.flags.DEFINE_string(\"training_data_file\", TRAININGSET_DIR, \"Data source for the training data.\")\ntf.flags.DEFINE_string(\"validation_data_file\", VALIDATIONSET_DIR, \"Data source for the validation data\")\ntf.flags.DEFINE_string(\"test_data_file\", TESTSET_DIR, \"Data source for the test data\")\ntf.flags.DEFINE_string(\"checkpoint_dir\", MODEL_DIR, \"Checkpoint directory from training run\")\n\n# Model Hyperparameters\ntf.flags.DEFINE_integer(\"pad_seq_len\", 120, \"Recommended padding Sequence length of data (depends on the data)\")\ntf.flags.DEFINE_integer(\"embedding_dim\", 300, \"Dimensionality of character embedding (default: 128)\")\ntf.flags.DEFINE_integer(\"embedding_type\", 1, \"The embedding type (default: 1)\")\ntf.flags.DEFINE_integer(\"fc_hidden_size\", 1024, \"Hidden size for fully connected layer (default: 1024)\")\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout keep probability (default: 0.5)\")\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0, \"L2 regularization lambda (default: 0.0)\")\n\n# Test Parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\ntf.flags.DEFINE_boolean(\"gpu_options_allow_growth\", True, \"Allow gpu options growth\")\n\nFLAGS = tf.flags.FLAGS\nFLAGS(sys.argv)\ndilim = '-' * 100\nlogger.info('\\n'.join([dilim, *['{0:>50}|{1:<50}'.format(attr.upper(), FLAGS.__getattr__(attr))\n for attr in sorted(FLAGS.__dict__['__wrapped'])], dilim]))\n\n\ndef test_ann():\n \"\"\"Test ANN model.\"\"\"\n\n # Load data\n logger.info(\"✔ Loading data...\")\n logger.info('Recommended padding Sequence length is: {0}'.format(FLAGS.pad_seq_len))\n\n logger.info('✔︎ Test data processing...')\n test_data = dh.load_data_and_labels(FLAGS.test_data_file, FLAGS.embedding_dim)\n\n logger.info('✔︎ Test data padding...')\n x_test_front, x_test_behind, y_test = dh.pad_data(test_data, FLAGS.pad_seq_len)\n y_test_labels = test_data.labels\n\n # Load ann model\n logger.info(\"✔ Loading model...\")\n checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n logger.info(checkpoint_file)\n\n graph = tf.Graph()\n with graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n session_conf.gpu_options.allow_growth = FLAGS.gpu_options_allow_growth\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{0}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n # Get the placeholders from the graph by name\n input_x_front = graph.get_operation_by_name(\"input_x_front\").outputs[0]\n input_x_behind = graph.get_operation_by_name(\"input_x_behind\").outputs[0]\n input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n is_training = graph.get_operation_by_name(\"is_training\").outputs[0]\n\n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n topKPreds = graph.get_operation_by_name(\"output/topKPreds\").outputs[0]\n accuracy = graph.get_operation_by_name(\"accuracy/accuracy\").outputs[0]\n loss = graph.get_operation_by_name(\"loss/loss\").outputs[0]\n\n # Split the output nodes name by '|' if you have several output nodes\n output_node_names = 'output/logits|output/predictions|output/softmax_scores|output/topKPreds'\n\n # Save the .pb model file\n output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,\n output_node_names.split(\"|\"))\n tf.train.write_graph(output_graph_def, 'graph', 'graph-ann-{0}.pb'.format(MODEL), as_text=False)\n\n # Generate batches for one epoch\n batches = dh.batch_iter(list(zip(x_test_front, x_test_behind, y_test, y_test_labels)),\n FLAGS.batch_size, 1, shuffle=False)\n\n # Collect the predictions here\n all_labels = []\n all_predicted_labels = []\n all_predicted_values = []\n\n for index, x_test_batch in enumerate(batches):\n x_batch_front, x_batch_behind, y_batch, y_batch_labels = zip(*x_test_batch)\n feed_dict = {\n input_x_front: x_batch_front,\n input_x_behind: x_batch_behind,\n input_y: y_batch,\n dropout_keep_prob: 1.0,\n is_training: False\n }\n\n all_labels = np.append(all_labels, y_batch_labels)\n\n batch_predicted_labels = sess.run(predictions, feed_dict)\n all_predicted_labels = np.concatenate([all_predicted_labels, batch_predicted_labels])\n\n batch_predicted_values = sess.run(topKPreds, feed_dict)\n all_predicted_values = np.append(all_predicted_values, batch_predicted_values)\n\n batch_loss = sess.run(loss, feed_dict)\n batch_acc = sess.run(accuracy, feed_dict)\n logger.info(\"✔︎ Test batch {0}: loss {1:g}, accuracy {2:g}.\".format((index + 1), batch_loss, batch_acc))\n\n # Save the prediction result\n if not os.path.exists(SAVE_DIR):\n os.makedirs(SAVE_DIR)\n dh.create_prediction_file(output_file=SAVE_DIR + '/predictions.json', front_data_id=test_data.front_testid,\n behind_data_id=test_data.behind_testid, all_labels=all_labels,\n all_predict_labels=all_predicted_labels, all_predict_values=all_predicted_values)\n\n logger.info(\"✔ Done.\")\n\n\nif __name__ == '__main__':\n test_ann()\n"
] | [
[
"numpy.concatenate",
"tensorflow.flags.DEFINE_string",
"tensorflow.train.latest_checkpoint",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.flags.DEFINE_float",
"tensorflow.ConfigProto",
"tensorflow.flags.DEFINE_integer",
"numpy.append"
]
] |
bramtoula/singleshotpose | [
"8979087bac76aab7d2e739e3cae726db40a22037"
] | [
"valid.py"
] | [
"import os\nimport time\nimport torch\nimport argparse\nimport scipy.io\nimport warnings\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\n\nimport dataset\nfrom darknet import Darknet\nfrom utils import *\nfrom MeshPly import MeshPly\n\nfrom raptor_specific_utils import *\nimport pdb\n\ndef valid(datacfg, modelcfg, weightfile):\n def truths_length(truths, max_num_gt=50):\n for i in range(max_num_gt):\n if truths[i][1] == 0:\n return i\n\n # Parse configuration files\n data_options = read_data_cfg(datacfg)\n valid_images = data_options['valid']\n if 'mesh' in data_options:\n meshname = data_options['mesh']\n else:\n meshname = None\n assert('box_length' in data_options)\n box_length = float(data_options['box_length'])\n box_width = float(data_options['box_width'])\n box_height = float(data_options['box_height'])\n backupdir = data_options['backup']\n name = data_options['name']\n gpus = data_options['gpus'] \n fx = float(data_options['fx'])\n fy = float(data_options['fy'])\n u0 = float(data_options['u0'])\n v0 = float(data_options['v0'])\n im_width = int(data_options['width'])\n im_height = int(data_options['height'])\n if not os.path.exists(backupdir):\n makedirs(backupdir)\n\n # Parameters\n seed = int(time.time())\n os.environ['CUDA_VISIBLE_DEVICES'] = gpus\n torch.cuda.manual_seed(seed)\n save = False\n testtime = True\n num_classes = 1\n testing_samples = 0.0\n if save:\n makedirs(backupdir + '/test')\n makedirs(backupdir + '/test/gt')\n makedirs(backupdir + '/test/pr')\n # To save\n testing_error_trans = 0.0\n testing_error_angle = 0.0\n testing_error_pixel = 0.0\n errs_2d = []\n errs_3d = []\n errs_trans = []\n errs_angle = []\n errs_corner2D = []\n preds_trans = []\n preds_rot = []\n preds_corners2D = []\n gts_trans = []\n gts_rot = []\n gts_corners2D = []\n\n # Read object model information, get 3D bounding box corners\n if meshname is None:\n # vertices must be 4 x N for compute_projections to work later\n vertices = np.array([[ box_length/2, box_width/2, box_height/2, 1.],\n [ box_length/2, box_width/2,-box_height/2, 1.],\n [ box_length/2,-box_width/2,-box_height/2, 1.],\n [ box_length/2,-box_width/2, box_height/2, 1.],\n [-box_length/2,-box_width/2, box_height/2, 1.],\n [-box_length/2,-box_width/2,-box_height/2, 1.],\n [-box_length/2, box_width/2,-box_height/2, 1.],\n [-box_length/2, box_width/2, box_height/2, 1.]]).T\n diam = float(data_options['diam'])\n else:\n mesh = MeshPly(meshname)\n vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()\n try:\n diam = float(data_options['diam'])\n except:\n diam = calc_pts_diameter(np.array(mesh.vertices))\n \n corners3D = get_3D_corners(vertices)\n intrinsic_calibration = get_camera_intrinsic(u0, v0, fx, fy)\n\n # Get validation file names\n with open(valid_images) as fp:\n tmp_files = fp.readlines()\n valid_files = [item.rstrip() for item in tmp_files]\n \n # Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode\n model = Darknet(modelcfg)\n model.print_network()\n model.load_weights(weightfile)\n model.cuda()\n model.eval()\n test_width = model.test_width\n test_height = model.test_height\n num_keypoints = model.num_keypoints \n num_labels = num_keypoints * 2 + 3 # +2 for width, height, +1 for class label\n\n # Get the parser for the test dataset\n fx = float(data_options['fx'])\n fy = float(data_options['fy'])\n u0 = float(data_options['u0'])\n v0 = float(data_options['v0'])\n im_width = int(data_options['width'])\n im_height = int(data_options['height'])\n\n K = get_camera_intrinsic(u0, v0, fx, fy)\n dist_coefs = None\n tf_cam_ego = None\n cam_params = (K, dist_coefs, im_width, im_height, tf_cam_ego)\n valid_dataset = dataset.listDataset(valid_images, \n shape=(test_width, test_height),\n shuffle=False,\n transform=transforms.Compose([transforms.ToTensor(),]),\n cam_params=cam_params,\n corners3D=corners3D)\n\n # Specify the number of workers for multiple processing, get the dataloader for the test dataset\n kwargs = {'num_workers': 4, 'pin_memory': True}\n test_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, **kwargs) \n\n logging(\" Testing {}...\".format(name))\n logging(\" Number of test samples: %d\" % len(test_loader.dataset))\n # Iterate through test batches (Batch size for test data is 1)\n count = 0\n for batch_idx, (data, target) in enumerate(test_loader):\n t1 = time.time()\n # Pass data to GPU\n pdb.set_trace()\n\n data = data.cuda()\n target = target.cuda()\n # Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference\n data = Variable(data, volatile=True)\n t2 = time.time()\n # Forward pass\n output = model(data).data \n t3 = time.time()\n # Using confidence threshold, eliminate low-confidence predictions\n all_boxes = get_region_boxes(output, num_classes, num_keypoints) \n t4 = time.time()\n # Evaluation\n # Iterate through all batch elements\n for box_pr, target in zip([all_boxes], [target[0]]):\n # For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)\n truths = target.view(-1, num_labels)\n # Get how many objects are present in the scene\n num_gts = truths_length(truths)\n # Iterate through each ground-truth object\n for k in range(num_gts):\n box_gt = list()\n for j in range(1, 2*num_keypoints+1):\n box_gt.append(truths[k][j])\n box_gt.extend([1.0, 1.0])\n box_gt.append(truths[k][0])\n\n # Denormalize the corner predictions \n corners2D_gt = np.array(np.reshape(box_gt[:18], [-1, 2]), dtype='float32')\n corners2D_pr = np.array(np.reshape(box_pr[:18], [-1, 2]), dtype='float32')\n corners2D_gt[:, 0] = corners2D_gt[:, 0] * im_width\n corners2D_gt[:, 1] = corners2D_gt[:, 1] * im_height \n corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width\n corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height\n preds_corners2D.append(corners2D_pr)\n gts_corners2D.append(corners2D_gt)\n\n # Compute corner prediction error\n corner_norm = np.linalg.norm(corners2D_gt - corners2D_pr, axis=1)\n corner_dist = np.mean(corner_norm)\n errs_corner2D.append(corner_dist)\n\n # [OPTIONAL] generate images with bb drawn on them\n draw_2d_proj_of_3D_bounding_box(data, corners2D_pr, corners2D_gt, None, batch_idx, k, im_save_dir = \"./backup/{}/valid_output_images/\".format(name))\n \n # Compute [R|t] by pnp\n R_gt, t_gt = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_gt, np.array(intrinsic_calibration, dtype='float32'))\n R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_pr, np.array(intrinsic_calibration, dtype='float32'))\n \n # Compute translation error\n trans_dist = np.sqrt(np.sum(np.square(t_gt - t_pr)))\n errs_trans.append(trans_dist)\n \n # Compute angle error\n angle_dist = calcAngularDistance(R_gt, R_pr)\n errs_angle.append(angle_dist)\n \n # Compute pixel error\n Rt_gt = np.concatenate((R_gt, t_gt), axis=1)\n Rt_pr = np.concatenate((R_pr, t_pr), axis=1)\n proj_2d_gt = compute_projection(vertices, Rt_gt, intrinsic_calibration)\n proj_2d_pred = compute_projection(vertices, Rt_pr, intrinsic_calibration) \n norm = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)\n pixel_dist = np.mean(norm)\n errs_2d.append(pixel_dist)\n\n # Compute 3D distances\n transform_3d_gt = compute_transformation(vertices, Rt_gt) \n transform_3d_pred = compute_transformation(vertices, Rt_pr) \n norm3d = np.linalg.norm(transform_3d_gt - transform_3d_pred, axis=0)\n vertex_dist = np.mean(norm3d) \n errs_3d.append(vertex_dist) \n \n # pdb.set_trace()\n\n # Sum errors\n testing_error_trans += trans_dist\n testing_error_angle += angle_dist\n testing_error_pixel += pixel_dist\n testing_samples += 1\n count = count + 1\n\n if save:\n preds_trans.append(t_pr)\n gts_trans.append(t_gt)\n preds_rot.append(R_pr)\n gts_rot.append(R_gt)\n\n np.savetxt(backupdir + '/test/gt/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_gt, dtype='float32'))\n np.savetxt(backupdir + '/test/gt/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_gt, dtype='float32'))\n np.savetxt(backupdir + '/test/pr/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_pr, dtype='float32'))\n np.savetxt(backupdir + '/test/pr/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_pr, dtype='float32'))\n np.savetxt(backupdir + '/test/gt/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_gt, dtype='float32'))\n np.savetxt(backupdir + '/test/pr/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_pr, dtype='float32'))\n\n\n t5 = time.time()\n\n # Compute 2D projection error, 6D pose error, 5cm5degree error\n px_threshold = 5 # 5 pixel threshold for 2D reprojection error is standard in recent sota 6D object pose estimation works \n eps = 1e-5\n acc = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (len(errs_2d)+eps)\n acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)\n acc3d10 = len(np.where(np.array(errs_3d) <= diam * 0.1)[0]) * 100. / (len(errs_3d)+eps)\n acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)\n corner_acc = len(np.where(np.array(errs_corner2D) <= px_threshold)[0]) * 100. / (len(errs_corner2D)+eps)\n mean_err_2d = np.mean(errs_2d)\n mean_corner_err_2d = np.mean(errs_corner2D)\n nts = float(testing_samples)\n\n if testtime:\n print('-----------------------------------')\n print(' tensor to cuda : %f' % (t2 - t1))\n print(' forward pass : %f' % (t3 - t2))\n print('get_region_boxes : %f' % (t4 - t3))\n print(' prediction time : %f' % (t4 - t1))\n print(' eval : %f' % (t5 - t4))\n print('-----------------------------------')\n\n # Print test statistics\n logging('Results of {}'.format(name))\n logging(' Acc using {} px 2D Projection = {:.2f}%'.format(px_threshold, acc))\n logging(' Acc using 10% threshold - {} vx 3D Transformation = {:.2f}%'.format(diam * 0.1, acc3d10))\n logging(' Acc using 5 cm 5 degree metric = {:.2f}%'.format(acc5cm5deg))\n logging(\" Mean 2D pixel error is %f, Mean vertex error is %f, mean corner error is %f\" % (mean_err_2d, np.mean(errs_3d), mean_corner_err_2d))\n logging(' Translation error: %f m, angle error: %f degree, pixel error: % f pix' % (testing_error_trans/nts, testing_error_angle/nts, testing_error_pixel/nts) )\n\n if save:\n predfile = backupdir + '/predictions_linemod_' + name + '.mat'\n scipy.io.savemat(predfile, {'R_gts': gts_rot, 't_gts':gts_trans, 'corner_gts': gts_corners2D, 'R_prs': preds_rot, 't_prs':preds_trans, 'corner_prs': preds_corners2D})\n\nif __name__ == '__main__':\n\n # Parse configuration files\n parser = argparse.ArgumentParser(description='SingleShotPose')\n parser.add_argument('--datacfg', type=str, default='cfg/ape.data') # data config\n parser.add_argument('--modelcfg', type=str, default='cfg/yolo-pose.cfg') # network config\n parser.add_argument('--weightfile', type=str, default='backup/ape/model_backup.weights') # imagenet initialized weights\n args = parser.parse_args()\n datacfg = args.datacfg\n modelcfg = args.modelcfg\n weightfile = args.weightfile\n valid(datacfg, modelcfg, weightfile)\n"
] | [
[
"torch.autograd.Variable",
"torch.cuda.manual_seed",
"torch.utils.data.DataLoader"
]
] |
SunnyChing/duckietown5909 | [
"b3c1c0088fb2802c0198b52846a8454f2ec0e79b"
] | [
"catkin_ws/src/f23-LED/led_detection/src/LED_detector_node_sunny.py"
] | [
"#!/usr/bin/env python\nimport rospy\nimport time\nfrom led_detection.LEDDetector import sunny_LEDDetector\nfrom std_msgs.msg import Byte\nfrom duckietown_msgs.msg import Vector2D, LEDDetection, LEDDetectionArray, LEDDetectionDebugInfo, BoolStamped\nfrom sensor_msgs.msg import CompressedImage\nfrom duckietown_utils.bag_logs import numpy_from_ros_compressed\nimport numpy as np\n\nclass LEDDetectorNode(object):\n def __init__(self):\n #Setup HSV threshold\n hsv_white1 = np.array([0,0,150])\n hsv_white2 = np.array([180,100,255])\n hsv_purple1 = np.array([25,50,50])\n hsv_purple2 = np.array([45,255,255])\n hsv_green1 = np.array([0,100,100])\n hsv_green2 = np.array([15,255,255])\n hsv_red3 = np.array([165,100,100])\n hsv_red4 = np.array([180,255,255])\n\n self.active = True # [INTERACTIVE MODE] Won't be overwritten if FSM isn't running, node always active \n self.first_timestamp = 0\n self.capture_time = 0.97 # capture time\n self.capture_finished = True\n self.tinit = None\n self.trigger = False\n self.node_state = 0\n self.data = []\n \n self.node_name = rospy.get_name()\n self.pub_detections = rospy.Publisher(\"~raw_led_detection\",LEDDetectionArray,queue_size=1)\n self.pub_debug = rospy.Publisher(\"~debug_info\",LEDDetectionDebugInfo,queue_size=1)\n self.veh_name = rospy.get_namespace().strip(\"/\")\n\n self.protocol = rospy.get_param(\"~LED_protocol\")\n self.crop_rect_normalized = rospy.get_param(\"~crop_rect_normalized\")\n self.capture_time = rospy.get_param(\"~capture_time\")\n self.cell_size = rospy.get_param(\"~cell_size\")\n self.continuous = rospy.get_param('~continuous', True) # Detect continuously as long as active\n # [INTERACTIVE MODE] set to False for manual trigger\n self.frequencies = self.protocol['frequencies'].values()\n\n rospy.loginfo('[%s] Config: \\n\\t crop_rect_normalized: %s, \\n\\t capture_time: %s, \\n\\t cell_size: %s'%(self.node_name, self.crop_rect_normalized, self.capture_time, self.cell_size))\n\n if not self.veh_name:\n # fall back on private param passed thru rosrun\n # syntax is: rosrun <pkg> <node> _veh:=<bot-id>\n if rospy.has_param('~veh'):\n self.veh_name = rospy.get_param('~veh')\n \n if not self.veh_name:\n raise ValueError('Vehicle name is not set.')\n\n rospy.loginfo('[%s] Vehicle: %s'%(self.node_name, self.veh_name))\n self.sub_cam = rospy.Subscriber(\"camera_node/image/compressed\",CompressedImage, self.camera_callback)\n self.sub_trig = rospy.Subscriber(\"~trigger\",Byte, self.trigger_callback)\n self.sub_switch = rospy.Subscriber(\"~switch\",BoolStamped,self.cbSwitch)\n rospy.loginfo('[%s] Waiting for camera image...' %self.node_name)\n\n def cbSwitch(self, switch_msg): # active/inactive switch from FSM\n self.active = switch_msg.data\n if(self.active):\n self.trigger = True\n\n def camera_callback(self, msg):\n if not self.active:\n return\n\n float_time = msg.header.stamp.to_sec()\n debug_msg = LEDDetectionDebugInfo()\n\n if self.trigger:\n rospy.loginfo('[%s] GOT TRIGGER! Starting...')\n self.trigger = False\n self.data = []\n self.capture_finished = False\n rospy.loginfo('[%s] Start capturing frames'%self.node_name)\n self.first_timestamp = msg.header.stamp.to_sec()\n self.tinit = time.time()\n\n elif self.capture_finished:\n self.node_state = 0\n rospy.loginfo('[%s] Waiting for trigger...' %self.node_name)\n\n if self.first_timestamp > 0:\n # TODO sanity check rel_time positive, restart otherwise \n rel_time = float_time - self.first_timestamp\n\n # Capturing\n if rel_time < self.capture_time:\n self.node_state = 1\n rgb = numpy_from_ros_compressed(msg)\n rospy.loginfo('[%s] Capturing frame %s' %(self.node_name, rel_time))\n self.data.append({'timestamp': float_time, 'rgb': rgb[:,:,:]})\n debug_msg.capture_progress = 100.0*rel_time/self.capture_time\n\n # Start processing\n elif not self.capture_finished and self.first_timestamp > 0:\n rospy.loginfo('[%s] Relative Time %s, processing' %(self.node_name, rel_time))\n self.node_state = 2\n self.capture_finished = True\n self.first_timestamp = 0\n self.sub_cam.unregister() # IMPORTANT! Explicitly ignore messages \n # while processing, accumulates delay otherwise!\n self.send_state(debug_msg)\n self.process_and_publish()\n\n self.send_state(debug_msg) # TODO move heartbeat to dedicated thread\n\n def trigger_callback(self, msg):\n self.trigger = True\n\n def process_and_publish(self):\n # TODO add check timestamps for dropped frames\n H, W, _ = self.data[0]['rgb'].shape\n n = len(self.data)\n dtype = [\n ('timestamp', 'float'),\n ('rgb', 'uint8', (H, W, 3)),\n ]\n images = np.zeros((n,), dtype=dtype)\n for i, v in enumerate(self.data):\n images[i]['timestamp'] = v['timestamp']\n images[i]['rgb'][:] = v['rgb']\n \n det = LEDDetector(False, False, False, self.pub_debug)\n rgb0 = self.data[0]['rgb']\n #mask = np.ones(dtype='bool', shape=rgb0.shape)\n tic = time.time()\n result = det.detect_led(images, self.frequencies, self.cell_size, self.crop_rect_normalized)\n self.pub_detections.publish(result)\n\n toc = time.time()-tic\n tac = time.time()-self.tinit\n rospy.loginfo('[%s] Detection done. Processing Time: %.2f'%(self.node_name, toc))\n print('[%s] Total Time taken: %.2f'%(self.node_name, tac))\n\n if(self.continuous):\n self.trigger = True\n self.sub_cam = rospy.Subscriber(\"camera_node/image/compressed\",CompressedImage, self.camera_callback)\n \n def send_state(self, msg):\n msg.state = self.node_state\n self.pub_debug.publish(msg) \n\nif __name__ == '__main__':\n rospy.init_node('LED_detector_node',anonymous=False)\n node = LEDDetectorNode()\n rospy.spin()\n\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
wopsed1004/CompDam_DGD | [
"3c860bbb7985a13fa5468a49007859946ade20b8"
] | [
"pyextmod/verify_debug.py"
] | [
"import numpy as np\nimport CompDam_DGD\nimport helpers as h\nimport os, sys, argparse, inspect, json, re, numpy, shutil\n\nsv_attributes_ignore = ('old', 'debugpy_count', 'direct')\n\ndef _main_entry(args):\n '''\n Loads debug file\n Runs compdam using sv_old\n Writes a json file with the state variables calculated by Abaqus and the Python Extension Module\n '''\n\n # debug file name\n debug_file_name = os.path.basename(args.debugfile)\n debug_file_name_no_ext = os.path.splitext(debug_file_name)[0]\n # location to path\n debug_file_abspath = os.path.abspath(args.debugfile)\n sys.path.append(os.path.dirname(debug_file_abspath))\n # job name\n jobName = re.sub(r'-[0-9]*-debug-[0-9]*', '', debug_file_name_no_ext)\n\n # For debugging\n with open(os.path.join(os.path.dirname(debug_file_abspath), jobName+'_python.log'), 'w') as log:\n log.write('Debug file name: ' + debug_file_name + '\\n')\n\n # logging\n pyextmod_log_file_name = jobName+'_fortran.log'\n CompDam_DGD.dgd_mod.log_init(level=4, filename=pyextmod_log_file_name, totaltime=0.081)\n\n # Load the debug file\n (m, p, sv, sv_old, F, F_old, U, debugpy) = h.loaddebugpy(filename=debug_file_name_no_ext)\n log.write('\\nState variables: \\n' + str(sv) + '\\n')\n print(sv)\n\n # Run CompDam\n sv_calculated = sv_old\n Cauchy = np.zeros((3,3), order='F')\n enerintern = 0\n enerinelas = 0\n fatigue_step = False\n func = getattr(CompDam_DGD.dgd_mod, args.subroutine)\n if args.subroutine == 'dgdkinkband':\n func(u=U, f=F, f_old=F_old, m=m, p=p, sv=sv_calculated, ndir=3, nshr=3, dt=0, density_abq=debugpy.density_abq, cauchy=Cauchy, enerintern=enerintern, enerinelas=enerinelas)\n else:\n func(u=U, f=F, f_old=F_old, m=m, p=p, sv=sv_calculated, ndir=3, nshr=3, dt=0, density_abq=debugpy.density_abq, cauchy=Cauchy, enerintern=enerintern, enerinelas=enerinelas, fatigue_step=fatigue_step)\n\n # Move the pyextmod log file to the testoutput directory\n CompDam_DGD.dgd_mod.log_close()\n os.rename(os.path.abspath(pyextmod_log_file_name), os.path.abspath(os.path.join(os.pardir, 'tests', 'testOutput', pyextmod_log_file_name)))\n\n ## GOAL is to compare state variables --> if state variables are computed correctly, assume the debug.py file logic works\n sv_comparison = {}\n attributes = _get_attributes(sv)\n for a in attributes:\n if a in sv_attributes_ignore:\n continue\n abq_sv = getattr(sv, a)\n pyextmod_sv = getattr(sv_calculated, a)\n # Convert numpy arrays to lists for serialization\n if isinstance(abq_sv, numpy.ndarray):\n abq_sv = abq_sv.tolist()\n if isinstance(pyextmod_sv, numpy.ndarray):\n pyextmod_sv = pyextmod_sv.tolist()\n sv_comparison[a] = (abq_sv, pyextmod_sv)\n\n # Write to file\n output_filename = jobName +'_pyextmod_results.json'\n with open(os.path.join(os.path.dirname(debug_file_abspath), output_filename), 'w') as outfile:\n json.dump(sv_comparison, outfile, indent=2)\n\n log.write('End of python extension module execution\\n')\n\n return\n\n\ndef _get_attributes(obj):\n attributes = []\n for i in inspect.getmembers(obj):\n if not i[0].startswith('_'):\n if not inspect.ismethod(i[1]):\n attributes.append(i[0])\n return attributes\n\n\nif __name__ == \"__main__\":\n\t# Arguments\n parser = argparse.ArgumentParser(description='Loads in state variables from a debug file to prove continuity between debug file and CompDam code.')\n parser.add_argument('subroutine', choices=['dgdinit', 'dgdevolve', 'dgdkinkband'], help='Specify which functionality from DGD to call.')\n parser.add_argument('debugfile', action='store', help='Path to debug file.')\n parser.set_defaults(func=_main_entry)\n \n # Parse the args\n args = parser.parse_args()\n args.func(args)\n"
] | [
[
"numpy.zeros"
]
] |
mkucz95/image_classifier | [
"dd42a3a5c7baa7bd09a70709c14bc183c8af14b1"
] | [
"submission/network.py"
] | [
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass Network(nn.Module):\n def __init__(self, input_size, output_size, hidden_layers, drop_p):\n super().__init__()\n self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])\n\n #create hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:]) #gives input/output sizes for each layer\n self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n self.output = nn.Linear(hidden_layers[-1], output_size)\n self.dropout = nn.Dropout(p=drop_p)\n \n def forward(self, x):\n for each in self.hidden_layers:\n x = F.relu(each(x)) #apply relu to each hidden node\n x = self.dropout(x) #apply dropout\n x = self.output(x) #apply output weights\n return F.log_softmax(x, dim=1) #apply activation log softmax\n \ndef check_accuracy_loss(model, loader, criterion, gpu):\n if gpu:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else: device = \"cpu\"\n\n model.eval()\n accuracy = 0\n loss=0\n with torch.no_grad():\n for images, labels in loader:\n images, labels = images.to(device), labels.to(device)\n outputs = model(images) #see how our network classifies\n prob = torch.exp(outputs) #get the exponents back\n results = (labels.data == prob.max(1)[1]) #which labels == our predictions (highest probability gives prediction)\n # Accuracy is number of correct predictions divided by all predictions, just take the mean\n accuracy+=results.type_as(torch.FloatTensor()).mean() \n loss+=criterion(outputs,labels) \n return accuracy/len(loader), loss/len(loader) #since acc and loss are sums, we need to get the avg over all the input images\n \n #NETWORK TRAINING\ndef train_network(model, trainloader, validloader, epochs, print_every, criterion, optimizer, scheduler, gpu):\n if gpu:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else: device = \"cpu\"\n \n steps=0\n # change to cuda\n model.to(device)\n model.train() #training mode\n\n for e in range(epochs):\n scheduler.step() #we use scheduler\n running_loss=0\n for ii, (inputs,labels) in enumerate(trainloader):\n steps+=1\n inputs, labels = inputs.to(device), labels.to(device) #move data to gpu\n optimizer.zero_grad()#zero out gradients so that one forward pass doesnt pick up previous forward's gradients\n outputs = model.forward(inputs)\n loss = criterion(outputs,labels)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n\n #code below courtesy of udacity\n if steps % print_every == 0:\n accuracy,valid_loss = check_accuracy_loss(model,validloader,criterion,gpu)\n \n print(\"Epoch: {}/{}... \".format(e+1, epochs),\n \"Training Loss: {:.4f}\".format(running_loss/print_every),\n \"Validation Loss: {:.4f}\".format(valid_loss),\n \"Validation Accuracy: {:.4f}\".format(accuracy))\n running_loss = 0\n model.train()"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.no_grad",
"torch.FloatTensor",
"torch.nn.functional.log_softmax",
"torch.cuda.is_available",
"torch.exp"
]
] |
Benjamin-Etheredge/siamese | [
"9665d52bb1e8bf329821788332eb38476595a60f"
] | [
"siamese/data/label_utils.py"
] | [
"import tensorflow as tf\nimport os\n\ndef get_labels_from_filenames(files: tf.Tensor, separator='_', label_idx=0):\n if not tf.is_tensor(files):\n files = tf.convert_to_tensor(files)\n\n path_splits = tf.strings.split(files, sep=os.sep)\n filenames = tf.squeeze(path_splits[:, -1:].to_tensor()) # must use slicing for ragged tensor\n filename_splits = tf.strings.split(filenames, sep=separator)\n # TODO join together since there can be multiple here\n labels = tf.squeeze(filename_splits[:, :label_idx+1].to_tensor()) # must use slicing for ragged tensor\n return labels\n\n\ndef get_label_from_filename(file: tf.Tensor, separator='_', label_idx=0):\n if not tf.is_tensor(file):\n files = tf.convert_to_tensor(file)\n\n path_split = tf.strings.split(file, sep=os.sep)\n filename = path_split[-1]\n filename_split = tf.strings.split(filename, sep=separator)\n label = filename_split[label_idx]\n return label\n\n\ndef get_labels_from_files_path(files):\n '''Getter for files in a directory named the label of data'''\n if not tf.is_tensor(files):\n files = tf.convert_to_tensor(files)\n\n splits = tf.strings.split(files, sep=os.sep)\n labels = tf.squeeze(splits[:, -2:-1].to_tensor())\n return labels\n\n\ndef get_label_from_file_path(file):\n '''Getter for file in a directory named the label of data'''\n if not tf.is_tensor(file):\n files = tf.convert_to_tensor(file)\n all_split = tf.strings.split(file, sep=os.sep)\n labels = all_split[-2]\n return labels\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.is_tensor",
"tensorflow.strings.split"
]
] |
Uason-Chen/SGP-JCA | [
"4ea9d4c7b049fe729ea98c86263ba208871beaf1"
] | [
"main.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport argparse\nimport inspect\nimport os\nimport pickle\nimport random\nimport shutil\nimport sys\nimport time\nfrom collections import OrderedDict\nimport traceback\nfrom sklearn.metrics import confusion_matrix\nimport csv\nimport numpy as np\n\n# torch\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\n# third part\nimport torchlight\nfrom torchlight import DictAction\n\n\ndef init_seed(seed):\n torch.cuda.manual_seed_all(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n # torch.backends.cudnn.enabled = False\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\n\ndef get_parser():\n # parameter priority: command line > config > default\n parser = argparse.ArgumentParser(\n description='Spatial Temporal Graph Convolution Network')\n parser.add_argument(\n '--work-dir',\n default='./work_dir/temp',\n help='the work folder for storing results')\n\n parser.add_argument('-model_saved_name', default='')\n parser.add_argument(\n '--config',\n default='./config/nturgbd-cross-view/test_bone.yaml',\n help='path to the configuration file')\n\n # processor\n parser.add_argument(\n '--phase', default='train', help='must be train or test')\n parser.add_argument(\n '--save-score',\n type=str2bool,\n default=False,\n help='if ture, the classification score will be stored')\n\n # visulize and debug\n parser.add_argument(\n '--seed', type=int, default=1, help='random seed for pytorch')\n parser.add_argument(\n '--log-interval',\n type=int,\n default=100,\n help='the interval for printing messages (#iteration)')\n parser.add_argument(\n '--save-interval',\n type=int,\n default=1,\n help='the interval for storing models (#iteration)')\n parser.add_argument(\n '--save-epoch',\n type=int,\n default=30,\n help='the start epoch to save model (#iteration)')\n parser.add_argument(\n '--eval-interval',\n type=int,\n default=5,\n help='the interval for evaluating models (#iteration)')\n parser.add_argument(\n '--print-log',\n type=str2bool,\n default=True,\n help='print logging or not')\n parser.add_argument(\n '--show-topk',\n type=int,\n default=[1, 5],\n nargs='+',\n help='which Top K accuracy will be shown')\n\n # feeder\n parser.add_argument(\n '--feeder', default='feeder.feeder', help='data loader will be used')\n parser.add_argument(\n '--num-worker',\n type=int,\n default=32,\n help='the number of worker for data loader')\n parser.add_argument(\n '--train-feeder-args',\n action=DictAction,\n default=dict(),\n help='the arguments of data loader for training')\n parser.add_argument(\n '--test-feeder-args',\n action=DictAction,\n default=dict(),\n help='the arguments of data loader for test')\n\n # model\n parser.add_argument('--model', default=None, help='the model will be used')\n parser.add_argument(\n '--model-args',\n action=DictAction,\n default=dict(),\n help='the arguments of model')\n parser.add_argument(\n '--weights',\n default=None,\n help='the weights for network initialization')\n parser.add_argument(\n '--ignore-weights',\n type=str,\n default=[],\n nargs='+',\n help='the name of weights which will be ignored in the initialization')\n\n # optim\n parser.add_argument(\n '--base-lr', type=float, default=0.01, help='initial learning rate')\n parser.add_argument(\n '--step',\n type=int,\n default=[20, 40, 60],\n nargs='+',\n help='the epoch where optimizer reduce the learning rate')\n parser.add_argument(\n '--device',\n type=int,\n default=0,\n nargs='+',\n help='the indexes of GPUs for training or testing')\n parser.add_argument('--optimizer', default='SGD', help='type of optimizer')\n parser.add_argument(\n '--nesterov', type=str2bool, default=False, help='use nesterov or not')\n parser.add_argument(\n '--batch-size', type=int, default=256, help='training batch size')\n parser.add_argument(\n '--test-batch-size', type=int, default=256, help='test batch size')\n parser.add_argument(\n '--start-epoch',\n type=int,\n default=0,\n help='start training from which epoch')\n parser.add_argument(\n '--num-epoch',\n type=int,\n default=80,\n help='stop training in which epoch')\n parser.add_argument(\n '--weight-decay',\n type=float,\n default=0.0005,\n help='weight decay for optimizer')\n return parser\n\n\nclass Processor():\n \"\"\" \n Processor for Skeleton-based Action Recgnition\n \"\"\"\n\n def __init__(self, arg):\n self.arg = arg\n self.save_arg()\n if arg.phase == 'train':\n arg.model_saved_name = os.path.join(arg.work_dir, 'runs')\n if os.path.isdir(arg.model_saved_name):\n print('log_dir: ', arg.model_saved_name, 'already exist')\n answer = input('delete it? y/n:')\n if answer == 'y':\n shutil.rmtree(arg.model_saved_name)\n print('Dir removed: ', arg.model_saved_name)\n input('Refresh the website of tensorboard by pressing any keys')\n else:\n print('Dir not removed: ', arg.model_saved_name)\n self.train_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'train'), 'train')\n self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'val'), 'val')\n\n self.global_step = 0\n self.load_model()\n self.load_optimizer()\n self.load_data()\n self.lr = self.arg.base_lr\n self.best_acc = 0\n self.best_acc_epoch = 0\n self.model = self.model.cuda(self.output_device)\n if type(self.arg.device) is list:\n if len(self.arg.device) > 1:\n self.model = nn.DataParallel(\n self.model,\n device_ids=self.arg.device,\n output_device=self.output_device)\n\n\n\n def load_data(self):\n Feeder = import_class(self.arg.feeder)\n self.data_loader = dict()\n if self.arg.phase == 'train':\n self.data_loader['train'] = torch.utils.data.DataLoader(\n dataset=Feeder(**self.arg.train_feeder_args),\n batch_size=self.arg.batch_size,\n shuffle=True,\n num_workers=self.arg.num_worker,\n drop_last=True,\n worker_init_fn=init_seed)\n self.data_loader['test'] = torch.utils.data.DataLoader(\n dataset=Feeder(**self.arg.test_feeder_args),\n batch_size=self.arg.test_batch_size,\n shuffle=False,\n num_workers=self.arg.num_worker,\n drop_last=False,\n worker_init_fn=init_seed)\n\n def load_model(self):\n # pdb.set_trace()\n self.arg.device = torchlight.visible_gpu(self.arg.device)\n output_device = self.arg.device[0] if type(self.arg.device) is list else self.arg.device\n self.output_device = output_device\n Model = import_class(self.arg.model)\n shutil.copy2(inspect.getfile(Model), self.arg.work_dir)\n print(Model)\n self.model = Model(**self.arg.model_args)\n # pdb.set_trace()\n print(self.model)\n self.loss = nn.CrossEntropyLoss().cuda(output_device)\n\n if self.arg.weights:\n self.global_step = int(arg.weights[:-3].split('-')[-1])\n self.print_log('Load weights from {}.'.format(self.arg.weights))\n if '.pkl' in self.arg.weights:\n with open(self.arg.weights, 'r') as f:\n weights = pickle.load(f)\n else:\n weights = torch.load(self.arg.weights)\n\n weights = OrderedDict(\n [[k.split('module.')[-1],\n v.cuda(output_device)] for k, v in weights.items()])\n\n keys = list(weights.keys())\n for w in self.arg.ignore_weights:\n for key in keys:\n if w in key:\n if weights.pop(key, None) is not None:\n self.print_log('Sucessfully Remove Weights: {}.'.format(key))\n else:\n self.print_log('Can Not Remove Weights: {}.'.format(key))\n\n try:\n self.model.load_state_dict(weights)\n except:\n state = self.model.state_dict()\n diff = list(set(state.keys()).difference(set(weights.keys())))\n print('Can not find these weights:')\n for d in diff:\n print(' ' + d)\n state.update(weights)\n self.model.load_state_dict(state)\n\n def load_optimizer(self):\n if self.arg.optimizer == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.arg.base_lr,\n momentum=0.9,\n nesterov=self.arg.nesterov,\n weight_decay=self.arg.weight_decay)\n elif self.arg.optimizer == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.arg.base_lr,\n weight_decay=self.arg.weight_decay)\n else:\n raise ValueError()\n\n def save_arg(self):\n # save arg\n arg_dict = vars(self.arg)\n if not os.path.exists(self.arg.work_dir):\n os.makedirs(self.arg.work_dir)\n with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:\n f.write(f\"# command line: {' '.join(sys.argv)}\\n\\n\")\n yaml.dump(arg_dict, f)\n\n def adjust_learning_rate(self, epoch):\n if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':\n if epoch < self.arg.warm_up_epoch:\n lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch\n else:\n lr = self.arg.base_lr * (\n 0.1 ** np.sum(epoch >= np.array(self.arg.step)))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n else:\n raise ValueError()\n\n def print_time(self):\n localtime = time.asctime(time.localtime(time.time()))\n self.print_log(\"Local current time : \" + localtime)\n\n def print_log(self, str, print_time=True):\n if print_time:\n localtime = time.asctime(time.localtime(time.time()))\n str = \"[ \" + localtime + ' ] ' + str\n print(str)\n if self.arg.print_log:\n with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f:\n print(str, file=f)\n\n def record_time(self):\n self.cur_time = time.time()\n return self.cur_time\n\n def split_time(self):\n split_time = time.time() - self.cur_time\n self.record_time()\n return split_time\n\n def train(self, epoch, save_model=False):\n self.model.train()\n self.print_log('Training epoch: {}'.format(epoch + 1))\n loader = self.data_loader['train']\n self.adjust_learning_rate(epoch)\n loss_value = []\n acc_value = []\n self.train_writer.add_scalar('epoch', epoch, self.global_step)\n self.record_time()\n timer = dict(dataloader=0.001, model=0.001, statistics=0.001)\n process = tqdm(loader, ncols=80)\n for batch_idx, (data, label, index) in enumerate(process):\n self.global_step += 1\n # get data\n if self.arg.downsample_T:\n data = data[:, :, list(range(0, data.shape[2], 2)), :]\n with torch.no_grad():\n data = data.float().cuda(self.output_device)\n label = label.long().cuda(self.output_device)\n timer['dataloader'] += self.split_time()\n\n # forward\n output = self.model(data)\n if isinstance(output, tuple):\n output, l1 = output\n l1 = l1.mean()\n else:\n l1 = 0\n loss = self.loss(output, label) + (l1 if epoch > 0 else 0)\n\n # backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n loss_value.append(loss.data.item())\n timer['model'] += self.split_time()\n\n value, predict_label = torch.max(output.data, 1)\n acc = torch.mean((predict_label == label.data).float())\n acc_value.append(acc.data.item())\n self.train_writer.add_scalar('acc', acc, self.global_step)\n self.train_writer.add_scalar('loss', loss.data.item(), self.global_step)\n self.train_writer.add_scalar('loss_l1', l1, self.global_step)\n\n # statistics\n self.lr = self.optimizer.param_groups[0]['lr']\n self.train_writer.add_scalar('lr', self.lr, self.global_step)\n timer['statistics'] += self.split_time()\n\n # statistics of time consumption and loss\n proportion = {\n k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values()))))\n for k, v in timer.items()\n }\n self.print_log(\n '\\tMean training loss: {:.4f}. Mean training acc: {:.2f}%.'.format(np.mean(loss_value), np.mean(acc_value)*100))\n self.print_log(\n '\\tTime consumption: [Data]{dataloader}, [Network]{model}'.format(\n **proportion))\n\n if save_model:\n state_dict = self.model.state_dict()\n weights = OrderedDict([[k.split('module.')[-1],\n v.cpu()] for k, v in state_dict.items()])\n\n torch.save(weights, self.arg.model_saved_name + '-' + str(epoch+1) + '-' + str(int(self.global_step)) + '.pt')\n\n def eval(self, epoch, save_score=False, loader_name=['test']):\n self.model.eval()\n self.print_log('Eval epoch: {}'.format(epoch + 1))\n for ln in loader_name:\n loss_value = []\n score_frag = []\n label_list = []\n pred_list = []\n step = 0\n process = tqdm(self.data_loader[ln], ncols=40)\n for batch_idx, (data, label, index) in enumerate(process):\n label_list.append(label)\n with torch.no_grad():\n data = data.float().cuda(self.output_device)\n label = label.long().cuda(self.output_device)\n output = self.model(data)\n if isinstance(output, tuple):\n output, l1 = output\n l1 = l1.mean()\n else:\n l1 = 0\n loss = self.loss(output, label)\n score_frag.append(output.data.cpu().numpy())\n loss_value.append(loss.data.item())\n\n _, predict_label = torch.max(output.data, 1)\n pred_list.append(predict_label.data.cpu().numpy())\n step += 1\n\n\n score = np.concatenate(score_frag)\n loss = np.mean(loss_value)\n accuracy = self.data_loader[ln].dataset.top_k(score, 1)\n if accuracy > self.best_acc:\n self.best_acc = accuracy\n self.best_acc_epoch = epoch + 1\n print('Accuracy: ', accuracy, ' model: ', self.arg.model_saved_name)\n if self.arg.phase == 'train':\n self.val_writer.add_scalar('loss', loss, self.global_step)\n self.val_writer.add_scalar('loss_l1', l1, self.global_step)\n self.val_writer.add_scalar('acc', accuracy, self.global_step)\n\n score_dict = dict(\n zip(self.data_loader[ln].dataset.sample_name, score))\n self.print_log('\\tMean {} loss of {} batches: {}.'.format(\n ln, len(self.data_loader[ln]), np.mean(loss_value)))\n for k in self.arg.show_topk:\n self.print_log('\\tTop{}: {:.2f}%'.format(\n k, 100 * self.data_loader[ln].dataset.top_k(score, k)))\n\n if save_score:\n with open('{}/epoch{}_{}_score.pkl'.format(\n self.arg.work_dir, epoch + 1, ln), 'wb') as f:\n pickle.dump(score_dict, f)\n\n # acc for each class:\n label_list = np.concatenate(label_list)\n pred_list = np.concatenate(pred_list)\n confusion = confusion_matrix(label_list, pred_list)\n list_diag = np.diag(confusion)\n list_raw_sum = np.sum(confusion, axis=1)\n each_acc = list_diag / list_raw_sum\n with open('{}/epoch{}_{}_each_class_acc.csv'.format(self.arg.work_dir, epoch + 1, ln), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(each_acc)\n writer.writerows(confusion)\n\n def start(self):\n if self.arg.phase == 'train':\n self.print_log('Parameters:\\n{}\\n'.format(str(vars(self.arg))))\n self.global_step = self.arg.start_epoch * len(self.data_loader['train']) / self.arg.batch_size\n def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n self.print_log(f'# Parameters: {count_parameters(self.model)}')\n for epoch in range(self.arg.start_epoch, self.arg.num_epoch):\n if self.lr < 1e-3:\n break\n save_model = (((epoch + 1) % self.arg.save_interval == 0) or (\n epoch + 1 == self.arg.num_epoch)) and (epoch+1) > self.arg.save_epoch\n self.train(epoch, save_model=save_model)\n\n self.eval(\n epoch,\n save_score=self.arg.save_score,\n loader_name=['test'])\n\n num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)\n self.print_log(f'Best accuracy: {self.best_acc}')\n self.print_log(f'Epoch number: {self.best_acc_epoch}')\n self.print_log(f'Model name: {self.arg.work_dir}')\n self.print_log(f'Model total number of params: {num_params}')\n self.print_log(f'Weight decay: {self.arg.weight_decay}')\n self.print_log(f'Base LR: {self.arg.base_lr}')\n self.print_log(f'Batch Size: {self.arg.batch_size}')\n self.print_log(f'Test Batch Size: {self.arg.test_batch_size}')\n self.print_log(f'seed: {self.arg.seed}')\n\n elif self.arg.phase == 'test':\n\n if self.arg.weights is None:\n raise ValueError('Please appoint --weights.')\n self.arg.print_log = False\n self.print_log('Model: {}.'.format(self.arg.model))\n self.print_log('Weights: {}.'.format(self.arg.weights))\n self.eval(epoch=0, save_score=self.arg.save_score, loader_name=['test'])\n self.print_log('Done.\\n')\n\ndef import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n __import__(mod_str)\n try:\n return getattr(sys.modules[mod_str], class_str)\n except AttributeError:\n raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))\n\n\nif __name__ == '__main__':\n parser = get_parser()\n\n # load arg form config file\n p = parser.parse_args()\n if p.config is not None:\n with open(p.config, 'r') as f:\n default_arg = yaml.load(f)\n key = vars(p).keys()\n for k in default_arg.keys():\n if k not in key:\n print('WRONG ARG: {}'.format(k))\n assert (k in key)\n parser.set_defaults(**default_arg)\n\n arg = parser.parse_args()\n init_seed(arg.seed)\n processor = Processor(arg)\n processor.start()\n"
] | [
[
"numpy.concatenate",
"sklearn.metrics.confusion_matrix",
"numpy.array",
"torch.cuda.manual_seed_all",
"torch.max",
"numpy.random.seed",
"numpy.sum",
"torch.no_grad",
"numpy.mean",
"torch.manual_seed",
"torch.load",
"numpy.diag",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
] |
senyang-ml/learning-to-learn-by-pytorch | [
"3d41e96d37045066ba38238b6a08733fd4afb125"
] | [
"project/optim.py"
] | [
"import torch\r\nUSE_CUDA = torch.cuda.is_available()\r\n\r\n\r\n###############################################################\r\n\r\n###################### 手工的优化器 ###################\r\n\r\ndef SGD(gradients, state, learning_rate=0.001):\r\n \r\n return -gradients*learning_rate, state\r\n\r\ndef RMS(gradients, state, learning_rate=0.01, decay_rate=0.9):\r\n if state is None:\r\n state = torch.zeros(gradients.size()[-1])\r\n if USE_CUDA == True:\r\n state = state.cuda()\r\n \r\n state = decay_rate*state + (1-decay_rate)*torch.pow(gradients, 2)\r\n update = -learning_rate*gradients / (torch.sqrt(state+1e-5))\r\n return update, state\r\n\r\ndef adam():\r\n return torch.optim.Adam()\r\n\r\n##########################################################"
] | [
[
"torch.optim.Adam",
"torch.cuda.is_available",
"torch.sqrt",
"torch.pow"
]
] |
lm-takumi-nakaso/blueoil | [
"6a5f1cc1fb78c86423338f99cb9dbf506a76f3d6"
] | [
"blueoil/datasets/tfds.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2018 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nimport functools\nimport os\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nfrom blueoil.datasets.base import Base, ObjectDetectionBase, SegmentationBase\nfrom blueoil.utils.tfds_builders.classification import ClassificationBuilder\nfrom blueoil.utils.tfds_builders.object_detection import ObjectDetectionBuilder\nfrom blueoil.utils.tfds_builders.segmentation import SegmentationBuilder\n\n\ndef _grayscale_to_rgb(record):\n return {\n \"image\": tf.image.grayscale_to_rgb(record[\"image\"]),\n \"label\": record[\"label\"]\n }\n\n\ndef _format_classification_record(record, image_size, num_classes):\n image = tf.image.resize(record[\"image\"], image_size)\n label = tf.one_hot(record[\"label\"], num_classes)\n\n return {\"image\": image, \"label\": label}\n\n\ndef _format_object_detection_record(record, image_size, num_max_boxes):\n image = tf.image.resize(record[\"image\"], image_size)\n\n # Convert coordinates from relative to absolute\n ymin = tf.slice(record[\"objects\"][\"bbox\"], [0, 0], [-1, 1])\n xmin = tf.slice(record[\"objects\"][\"bbox\"], [0, 1], [-1, 1])\n ymax = tf.slice(record[\"objects\"][\"bbox\"], [0, 2], [-1, 1])\n xmax = tf.slice(record[\"objects\"][\"bbox\"], [0, 3], [-1, 1])\n\n ymin = tf.cast(ymin * image_size[0], tf.int64)\n xmin = tf.cast(xmin * image_size[1], tf.int64)\n ymax = tf.cast(ymax * image_size[0], tf.int64)\n xmax = tf.cast(xmax * image_size[1], tf.int64)\n\n height = ymax - ymin\n width = xmax - xmin\n\n # Combine boxes and labels\n label = tf.expand_dims(record[\"objects\"][\"label\"], axis=1)\n gt_boxes = tf.concat([xmin, ymin, width, height, label], axis=1)\n\n # Fill gt_boxes with dummy boxes\n dummy_boxes = tf.stack([tf.constant([0, 0, 0, 0, -1], tf.int64)] * num_max_boxes, axis=0)\n gt_boxes = tf.concat([gt_boxes, dummy_boxes], axis=0)\n gt_boxes = tf.slice(gt_boxes, [0, 0], [num_max_boxes, 5])\n\n return {\"image\": image, \"label\": gt_boxes}\n\n\ndef _format_segmentation_record(record, image_size):\n image = tf.image.resize(record[\"image\"], image_size)\n segmentation_mask = tf.squeeze(tf.image.resize(record[\"segmentation_mask\"], image_size), axis=2)\n\n return {\"image\": image, \"label\": segmentation_mask}\n\n\nclass TFDSMixin:\n \"\"\"A Mixin to compose dataset classes for TFDS.\"\"\"\n available_subsets = [\"train\", \"validation\"]\n extend_dir = None\n\n def __init__(\n self,\n name,\n data_dir,\n image_size,\n download=False,\n num_max_boxes=None,\n tfds_pre_processor=None,\n tfds_augmentor=None,\n *args,\n **kwargs\n ):\n super().__init__(\n *args,\n **kwargs,\n )\n\n if name in tfds.list_builders():\n self._builder = tfds.builder(name, data_dir=data_dir)\n if download:\n self._builder.download_and_prepare()\n else:\n if not tf.io.gfile.exists(os.path.join(data_dir, name)):\n raise ValueError(\"Dataset directory does not exist: {}\\n\"\n \"Please run `python blueoil/cmd/build_tfds.py -c <config file>` before training.\"\n .format(os.path.join(data_dir, name)))\n\n self._builder = self.builder_class(name, data_dir=data_dir)\n\n self.info = self._builder.info\n self._init_available_splits()\n self._validate_feature_structure()\n\n self.tf_dataset = self._builder.as_dataset(split=self.available_splits[self.subset])\n self.tfds_pre_processor = tfds_pre_processor\n self.tfds_augmentor = tfds_augmentor\n self._image_size = image_size\n self._num_max_boxes = num_max_boxes\n self._format_dataset()\n\n @property\n def num_per_epoch(self):\n split = self.available_splits[self.subset]\n return self.info.splits[split].num_examples\n\n @property\n def __getitem__(self, i):\n raise NotImplementedError()\n\n def __len__(self):\n return self.num_per_epoch\n\n def _init_available_splits(self):\n \"\"\"Initializing available splits dictionary depending on\n what kind of splits the dataset has.\n \"\"\"\n self.available_splits = {}\n if tfds.Split.TRAIN not in self.info.splits:\n raise ValueError(\"Datasets need to have a split \\\"TRAIN\\\".\")\n\n if tfds.Split.VALIDATION in self.info.splits and tfds.Split.TEST in self.info.splits:\n self.available_splits[\"train\"] = tfds.Split.TRAIN\n self.available_splits[\"validation\"] = tfds.Split.VALIDATION\n self.available_splits[\"test\"] = tfds.Split.TEST\n\n elif tfds.Split.VALIDATION in self.info.splits:\n self.available_splits[\"train\"] = tfds.Split.TRAIN\n self.available_splits[\"validation\"] = tfds.Split.VALIDATION\n\n elif tfds.Split.TEST in self.info.splits:\n self.available_splits[\"train\"] = tfds.Split.TRAIN\n self.available_splits[\"validation\"] = tfds.Split.TEST\n\n else:\n raise ValueError(\"Datasets need to have a split \\\"VALIDATION\\\" or \\\"TEST\\\".\")\n\n def _validate_feature_structure(self):\n \"\"\"Checking if the given dataset has a valid feature structure.\n\n This method will raise a ValueError if the structure is invalid.\n\n Args:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError()\n\n def _format_dataset(self):\n \"\"\"Converting the format of loaded dataset.\"\"\"\n raise NotImplementedError()\n\n\nclass TFDSClassification(TFDSMixin, Base):\n \"\"\"A dataset class for loading TensorFlow Datasets for classification.\n TensorFlow Datasets which have \"label\" and \"image\" features can be loaded by this class.\n \"\"\"\n builder_class = ClassificationBuilder\n\n @property\n def classes(self):\n return self.info.features[\"label\"].names\n\n @property\n def num_classes(self):\n return self.info.features[\"label\"].num_classes\n\n def _validate_feature_structure(self):\n is_valid = \\\n \"label\" in self.info.features and \\\n \"image\" in self.info.features and \\\n isinstance(self.info.features[\"label\"], tfds.features.ClassLabel) and \\\n isinstance(self.info.features[\"image\"], tfds.features.Image)\n\n if not is_valid:\n raise ValueError(\"Datasets should have \\\"label\\\" and \\\"image\\\" features.\")\n\n def _format_dataset(self):\n if self.info.features['image'].shape[2] == 1:\n self.tf_dataset = self.tf_dataset.map(\n _grayscale_to_rgb,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n self.tf_dataset = self.tf_dataset.map(\n lambda record: _format_classification_record(record, self._image_size, self.num_classes),\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n\nclass TFDSObjectDetection(TFDSMixin, ObjectDetectionBase):\n \"\"\"A dataset class for loading TensorFlow Datasets for object detection.\n TensorFlow Datasets which have \"objects\" and \"image\" features can be loaded by this class.\n \"\"\"\n builder_class = ObjectDetectionBuilder\n\n @classmethod\n @functools.lru_cache(maxsize=None)\n def count_max_boxes(cls, builder):\n sess = tf.compat.v1.Session()\n max_boxes = 0\n\n for split in builder.info.splits:\n tf_dataset = builder.as_dataset(split=split)\n iterator = tf.compat.v1.data.make_one_shot_iterator(tf_dataset)\n next_batch = iterator.get_next()\n\n while True:\n try:\n data = sess.run(next_batch)\n if max_boxes < data[\"objects\"][\"label\"].shape[0]:\n max_boxes = data[\"objects\"][\"label\"].shape[0]\n except tf.errors.OutOfRangeError:\n break\n\n return max_boxes\n\n @property\n def classes(self):\n return self.info.features[\"objects\"][\"label\"].names\n\n @property\n def num_classes(self):\n return self.info.features[\"objects\"][\"label\"].num_classes\n\n @property\n def num_max_boxes(self):\n if self._num_max_boxes is None:\n self._num_max_boxes = self.__class__.count_max_boxes(self._builder)\n\n return self._num_max_boxes\n\n def _validate_feature_structure(self):\n is_valid = \\\n \"image\" in self.info.features and \\\n \"objects\" in self.info.features and \\\n \"label\" in self.info.features[\"objects\"].feature and \\\n \"bbox\" in self.info.features[\"objects\"].feature and \\\n isinstance(self.info.features[\"image\"], tfds.features.Image) and \\\n isinstance(self.info.features[\"objects\"], tfds.features.Sequence) and \\\n isinstance(self.info.features[\"objects\"][\"label\"], tfds.features.ClassLabel) and \\\n isinstance(self.info.features[\"objects\"][\"bbox\"], tfds.features.BBoxFeature)\n\n if not is_valid:\n raise ValueError(\"Datasets should have \\\"objects\\\" and \\\"image\\\" features and \"\n \"\\\"objects\\\" should be a Sequence containing \\\"label\\\" and \\\"bbox\\\".\")\n\n def _format_dataset(self):\n if self.info.features['image'].shape[2] == 1:\n self.tf_dataset = self.tf_dataset.map(\n _grayscale_to_rgb,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n # self.num_max_boxes should be evaluated before executing lambda function.\n num_max_boxes = self.num_max_boxes\n\n self.tf_dataset = self.tf_dataset.map(\n lambda record: _format_object_detection_record(record, self._image_size, num_max_boxes),\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n\nclass TFDSSegmentation(TFDSMixin, SegmentationBase):\n \"\"\"A dataset class for loading TensorFlow Datasets for segmentation.\n TensorFlow Datasets which have \"label\" and \"image\" features can be loaded by this class.\n \"\"\"\n builder_class = SegmentationBuilder\n\n @property\n def classes(self):\n return self.info.features[\"label\"].names\n\n @property\n def num_classes(self):\n return self.info.features[\"label\"].num_classes\n\n def _validate_feature_structure(self):\n is_valid = \\\n \"label\" in self.info.features and \\\n \"image\" in self.info.features and \\\n \"segmentation_mask\" in self.info.features and \\\n isinstance(self.info.features[\"label\"], tfds.features.ClassLabel) and \\\n isinstance(self.info.features[\"image\"], tfds.features.Image) and \\\n isinstance(self.info.features[\"segmentation_mask\"], tfds.features.Image)\n\n if not is_valid:\n raise ValueError(\"Datasets should have \\\"label\\\", \\\"image\\\" and \\\"segmentation_mask\\\" features.\")\n\n def _format_dataset(self):\n if self.info.features['image'].shape[2] == 1:\n self.tf_dataset = self.tf_dataset.map(\n _grayscale_to_rgb,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n self.tf_dataset = self.tf_dataset.map(\n lambda record: _format_segmentation_record(record, self._image_size),\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n"
] | [
[
"tensorflow.compat.v1.data.make_one_shot_iterator",
"tensorflow.concat",
"tensorflow.expand_dims",
"tensorflow.one_hot",
"tensorflow.compat.v1.Session",
"tensorflow.constant",
"tensorflow.slice",
"tensorflow.image.resize",
"tensorflow.cast",
"tensorflow.image.grayscale_to_rgb"
]
] |
foamliu/Face-Recognition | [
"953a949722da04a3d36fdd028f3c786e89d65ed3"
] | [
"demo.py"
] | [
"import json\n\nimport cv2 as cv\nimport dlib\nimport imutils\nimport keras.backend as K\nimport numpy as np\nfrom imutils import face_utils\nfrom keras.applications.inception_resnet_v2 import preprocess_input\nfrom keras.models import load_model\n\nif __name__ == '__main__':\n img_size = 139\n model = load_model('models/model.10-0.0156.hdf5')\n detector = dlib.get_frontal_face_detector()\n image_inputs = np.empty((1, img_size, img_size, 3), dtype=np.float32)\n dummy_input = np.zeros((1, img_size, img_size, 3), dtype=np.float32)\n\n filename = 'images/foamliu.png'\n image = cv.imread(filename)\n image = imutils.resize(image, width=500)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n rects = detector(gray, 1)\n (x, y, w, h) = face_utils.rect_to_bb(rects[0])\n image = image[y:y + h, x:x + w]\n image = cv.resize(image, (img_size, img_size))\n image = image[:, :, ::-1].astype(np.float32)\n image_inputs[0] = preprocess_input(image)\n y_pred = model.predict([image_inputs, dummy_input, dummy_input])\n e1 = y_pred[0, 0:128]\n\n with open('data/results.json', 'r') as file:\n embeddings = json.load(file)\n\n distances = []\n for e in embeddings:\n e2 = e['embedding']\n distance = np.linalg.norm(e1 - e2) ** 2\n distances.append(distance)\n\n index = np.argmin(distances)\n print(embeddings[index]['alias'])\n\n for i in range(len(embeddings)):\n print('alias: {} distance:{}'.format(embeddings[i]['alias'], distances[i]))\n\n K.clear_session()\n"
] | [
[
"numpy.linalg.norm",
"numpy.empty",
"numpy.zeros",
"numpy.argmin"
]
] |
manuhuth/Replication-of-Bailey-2010- | [
"dbf1cd6c1463d52b569bd2e7ce3ae4624451b422"
] | [
"auxiliary/ext.py"
] | [
"import pandas as pd\r\nimport numpy as np\r\n\r\n\r\n\r\ndef sim_start_val1(models, df, cluster_by, meth ='newton', numb_it = '100', up_bound = 1, low_bound = -1):\r\n \"\"\"Function to create different parameter values for random starting values of the models used in section 5.\r\n \r\n Args:\r\n models: list of models to evaluate the parameters for\r\n meth: optimization method, default is Newton-Raphson\r\n numb_it: number of draws\r\n up/low_bound: upper and lower bounds for random draw of starting values from a uniform distribution\r\n \r\n Returns:\r\n -------\r\n A data frame containing the results for both parameter\r\n \"\"\"\r\n df_results = pd.DataFrame()\r\n model_num = 0\r\n for u in models:\r\n model_num = model_num + 1\r\n n_params = u.exog.shape[1]\r\n par_sales = []\r\n par_interact = []\r\n\r\n for v in range(0,numb_it):\r\n start = np.random.uniform(low_bound, up_bound, n_params)\r\n fit = u.fit(cov_type='cluster', cov_kwds={'groups': df[cluster_by]}, disp = False, start_params = start, method = meth)\r\n margins = fit.get_margeff()\r\n par_sales.append(margins.margeff[0])\r\n par_interact.append(margins.margeff[2]) \r\n df_results['sales_model{}'.format(model_num)] = par_sales\r\n df_results['interact_1970_model{}'.format(model_num)] = par_interact\r\n return df_results\r\n\r\n\r\ndef sim_start_val2(models, df, cluster_by, meth ='newton', numb_it = '100', up_bound = 1, low_bound = -1):\r\n \"\"\"Function to create different parameter values for random starting values of the models used in section 5.\r\n \r\n Args:\r\n models: list of models to evaluate the parameters for\r\n meth: optimization method, default is Newton-Raphson\r\n numb_it: number of draws\r\n up/low_bound: upper and lower bounds for random draw of starting values from a uniform distribution\r\n \r\n Returns:\r\n -------\r\n A data frame containing the results for the three parameter\r\n \"\"\"\r\n df_results = pd.DataFrame()\r\n model_num = 0\r\n for u in models:\r\n model_num = model_num + 1\r\n n_params = u.exog.shape[1]\r\n par_sales = []\r\n par_interact_1970 = []\r\n par_interact_1965 = []\r\n\r\n for v in range(0,numb_it):\r\n start = np.random.uniform(low_bound, up_bound, n_params)\r\n fit = u.fit(cov_type='cluster', cov_kwds={'groups': df[cluster_by]}, disp = False, start_params = start, method = meth)\r\n margins = fit.get_margeff()\r\n par_sales.append(margins.margeff[0])\r\n par_interact_1970.append(margins.margeff[3]) \r\n par_interact_1965.append(margins.margeff[4]) \r\n df_results['sales_model{}'.format(model_num)] = par_sales\r\n df_results['interact_1970_model{}'.format(model_num)] = par_interact_1970\r\n df_results['interact_1965_model{}'.format(model_num)] = par_interact_1965\r\n return df_results"
] | [
[
"pandas.DataFrame",
"numpy.random.uniform"
]
] |
ArturoDeza/EmergentProperties | [
"c3f0c4a4064c03b12e89f79ddbf6104736ccf231"
] | [
"Square_Cue_Conflict/plot_Square_curves.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom shutil import copyfile\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy import stats, optimize, interpolate\nimport math\n\n###############################\n# Trained on Both Distortions #\n###############################\n\n#Model_Type = 'AlexNet'\nModel_Type = 'ResNet18'\nnumber_runs = 10\nTop_k = 1\n#epoch_real = 70\n#epoch_real = 60\n#epoch_real = 0\n#epoch_real = 1\n#epoch_real = 5\n#epoch_real = 30\n#epoch_real = 70\n#epoch_real = 120\n#epoch_real = 0\n#epoch_real = 1\n#epoch_real = 4\n#epoch_real = 20\n#epoch_real = 60\nepoch_real = 80\n\nepoch_real_str = str(epoch_real)\nnumber_runs_str = str(number_runs)\n\nepoch_equivalent = epoch_real\nepoch_equivalent_str = str(epoch_equivalent)\n\nif Top_k == 1:\n\tUpper_lim = 80\nelse:\n\tUpper_lim = 100\n\nTop_k_str = str(Top_k)\n\nNetwork_Arch_Names_in = ['ResNet18','AlexNet','VGG11']\nNetwork_Arch_Names_out = ['ResNet18','AlexNet','VGG11']\n\nNetwork_Name_Mapping = dict(zip(Network_Arch_Names_in,Network_Arch_Names_out))\n\n#Training_Regimes = ['Foveation-Texture-Net','Foveation-Blur-Net']\n#Training_Regimes = ['Reference-Net','Uniform-Net']\nTraining_Regimes = ['Reference-Net','Foveation-Texture-Net','Uniform-Net','Foveation-Blur-Net']\n\nAll_Regimes = ['Reference-Net','Foveation-Texture-Net','Uniform-Net','Foveation-Blur-Net']\n\nColor_Scheme = ['darkorange','navy','orchid','seagreen']\nAlpha_Values = [1.0,1.0,1.0,1.0]\nLine_Values = ['--','-','-','-']\n\nColor_Legend = dict(zip(All_Regimes,Color_Scheme))\nAlpha_Legend = dict(zip(All_Regimes,Alpha_Values))\nLine_Legend = dict(zip(All_Regimes,Line_Values))\n\nName_Regimes = ['Reference-Net','Foveation-Texture-Net','Uniform-Net','Foveation-Blur-Net']\n\nTraining_Name_Legend = dict(zip(All_Regimes,Name_Regimes))\n\n\nExperiment_name_mod = ['Square_Uniform_Conflict']\n\nExperiment_name_mod_list = ''\n\nfor i in range(len(Experiment_name_mod)):\n Experiment_name_mod_list = Experiment_name_mod_list + '_' + Experiment_name_mod[i]\n\nTraining_Regimes_list = ''\n\nfor i in range(len(Training_Regimes)):\n Training_Regimes_list = Training_Regimes_list + '_' + Training_Regimes[i]\n\n\nExperiment_name_in = ['Square_Uniform_Conflict']\nExperiment_name_out = ['Square_Uniform_Conflict']\n\nExperiment_Name_Legend = dict(zip(Experiment_name_in,Experiment_name_out))\n\nAcc_Regimes_Periphery = np.zeros((len(Training_Regimes),number_runs,len(Experiment_name_mod),17))\nAcc_Regimes_Fovea = np.zeros((len(Training_Regimes),number_runs,len(Experiment_name_mod),17))\n\nfor run_id in range(1,number_runs+1):\n\trun_id_str = str(run_id)\n\tfor regime in range(len(Training_Regimes)):\n\t\tfor exp in range(len(Experiment_name_mod)):\n\t\t\tfor level in range(1,18):\n\t\t\t\tlevel_str = str(level)\n\t\t\t\tfile_periphery = './Results/Top' + Top_k_str + '/' + Training_Regimes[regime] + '/' + Model_Type + '_' + run_id_str + '_class_periphery_Object_' + level_str + '_Epoch_' + epoch_real_str + '.npy'\n\t\t\t\tfile_fovea = './Results/Top' + Top_k_str + '/' + Training_Regimes[regime] + '/' + Model_Type + '_' + run_id_str + '_class_fovea_Object_' + level_str + '_Epoch_' + epoch_real_str + '.npy'\n\t\t\t\tfile_total = './Results/Top' + Top_k_str + '/'+ Training_Regimes[regime] + '/' + Model_Type + '_' + run_id_str + '_class_total_Object_' + level_str + '_Epoch_' + epoch_real_str + '.npy'\n\t\t\t\t# Get Scores\n\t\t\t\tperiphery_vec = np.load(file_periphery)\n\t\t\t\tfovea_vec = np.load(file_fovea)\n\t\t\t\ttotal_vec = np.load(file_total)\n\t\t\t\t# Compute Accuracy\n\t\t\t\tAcc_Regimes_Periphery[regime,run_id-1,exp,level-1] = float(np.sum(periphery_vec))/float(np.sum(total_vec))*100.0\n\t\t\t\tAcc_Regimes_Fovea[regime,run_id-1,exp,level-1] = float(np.sum(fovea_vec))/float(np.sum(total_vec))*100.0\n\nAcc_Regimes_Periphery_mean = np.mean(Acc_Regimes_Periphery,1)\nAcc_Regimes_Periphery_std = np.std(Acc_Regimes_Periphery,1)\n\nAcc_Regimes_Fovea_mean = np.mean(Acc_Regimes_Fovea,1)\nAcc_Regimes_Fovea_std = np.std(Acc_Regimes_Fovea,1)\n\nx = np.linspace(0,1,17)\n\n# Compute all Cross-Over Points:\ndef cross_over(Array_Per,Array_Fov):\n\tcross_array = np.empty(len(Array_Per))\n\tfor z in range(len(Array_Per)):\n\t\tx_Mid = math.nan\n\t\tfor i in range(16):\n\t\t\tif Array_Per[z,0][i]>Array_Fov[z,0][i] and Array_Per[z,0][i+1]<Array_Fov[z,0][i+1]:\n\t\t\t\tPoint_Per_y = np.array((Array_Per[z,0][i],Array_Per[z,0][i+1]))\n\t\t\t\tPoint_Per_x = np.array((x[i],x[i+1]))\n\t\t\t\t#\n\t\t\t\tm_Per = (Point_Per_y[1]-Point_Per_y[0])/(Point_Per_x[1]-Point_Per_x[0])\n\t\t\t\tb_Per = Point_Per_y[0]-m_Per*Point_Per_x[0]\n\t\t\t\t#\n\t\t\t\tPoint_Fov_y = np.array((Array_Fov[z,0][i],Array_Fov[z,0][i+1]))\n\t\t\t\tPoint_Fov_x = np.array((x[i],x[i+1]))\n\t\t\t\t#\n\t\t\t\tm_Fov = (Point_Fov_y[1]-Point_Fov_y[0])/(Point_Fov_x[1]-Point_Fov_x[0])\n\t\t\t\tb_Fov = Point_Fov_y[0]-m_Fov*Point_Fov_x[0]\n\t\t\t\t# Now compute the cross-over points:\n\t\t\t\tx_Mid = (b_Fov-b_Per)/(m_Per-m_Fov)\n\t\t\t\tbreak\n\t\tcross_array[z] = x_Mid\n\treturn cross_array\n\nArray_Per = Acc_Regimes_Periphery_mean\nArray_Fov = Acc_Regimes_Fovea_mean\n\nprint('----------------------')\nprint(cross_over(Array_Per,Array_Fov))\nprint('----------------------')\n\n##################\n# Use Matplotlib #\n##################\n\n# Define x-ticks:\n#x = np.array([0,1.55,3.43,7.59,16.8,37.18,80.77,100])/100 # These values pre-computed from the area ratio of the log-polar pooling windows\nx = np.linspace(0,1,17)\n\ndef compute_Area(Acc_Matrix,regime,run_id,exp,links):\n\tArea_Total = 0.0\n\tnum_points = 17\n\tfor i in range(1,num_points-1):\n\t\tArea_i = Acc_Matrix[regime,run_id,exp,i]*np.abs(links[i+1]-links[i])-(Acc_Matrix[regime,run_id,exp,i+1]-Acc_Matrix[regime,run_id,exp,i])*np.abs(links[i+1]-links[i])/2.0\n\t\tArea_Total = Area_Total + Area_i\n\treturn 100*Area_Total\n\nfig,axs = plt.subplots(1, len(Experiment_name_mod), figsize=(8, 7.5),squeeze=False)\n#fix,axs = plt.subplots(2, 4, figsize=(24, 15),squeeze=False)\nfig.suptitle(Model_Type)\n\nfig.suptitle(Network_Name_Mapping[Model_Type] + ' Robustness to Window @ Epoch ' + epoch_equivalent_str )\n\nfor i in range(len(Experiment_name_mod)):\n\tyerr_total_Periphery = np.zeros((len(Training_Regimes),2,17))\n\t#\n\tfor j in range(len(Training_Regimes)):\n\t\tyerr_total_Periphery[j,0,:] = Acc_Regimes_Periphery_std[j,i,:]\n\t\tyerr_total_Periphery[j,1,:] = Acc_Regimes_Periphery_std[j,i,:]\n\t#\n\tx_i,y_i = int(i/len(Experiment_name_mod)), i% len(Experiment_name_mod)\n\t#\n\tfor j in range(len(Training_Regimes)):\n\t\taxs[x_i,y_i].errorbar(x,Acc_Regimes_Periphery_mean[j,i,:],yerr=yerr_total_Periphery[j,:,:],markersize=10,marker='o',\n\t\t\t\t\t\t\t color=Color_Legend[\n\t\t\tTraining_Regimes[j]],alpha=Alpha_Legend[Training_Regimes[j]],linestyle=Line_Legend[Training_Regimes[j]],linewidth=2)\n\t#\n\taxs[x_i,y_i].legend([Training_Name_Legend[Training_Regimes[z]] for z in range(len(Training_Regimes))])\n\tyerr_total_Fovea = np.zeros((len(Training_Regimes),2,17))\n\t#\n\tfor j in range(len(Training_Regimes)):\n\t\tyerr_total_Fovea[j,0,:] = Acc_Regimes_Fovea_std[j,i,:]\n\t\tyerr_total_Fovea[j,1,:] = Acc_Regimes_Fovea_std[j,i,:]\n\t#\n\tx_i,y_i = int(i/len(Experiment_name_mod)), i% len(Experiment_name_mod)\n\t#\n\tfor j in range(len(Training_Regimes)):\n\t\taxs[x_i,y_i].errorbar(x,Acc_Regimes_Fovea_mean[j,i,:],yerr=yerr_total_Fovea[j,:,:],markersize=10,marker='*',\n\t\t\t\t\t\t\t color=Color_Legend[\n\t\t\tTraining_Regimes[j]],alpha=Alpha_Legend[Training_Regimes[j]],linestyle=Line_Legend[Training_Regimes[j]],linewidth=2)\n\t#\n\taxs[x_i,y_i].plot(x,np.repeat(Top_k/20.0*100.0,17),'--y',linewidth=2)\n\taxs[x_i,y_i].set(xlim=(-0.1,1.1),ylim=(0,Upper_lim))\n\taxs[x_i,y_i].set_title(Experiment_Name_Legend[Experiment_name_mod[i]])\n\taxs[x_i,y_i].set_xticks(np.arange(0,1.1,step=0.1))\n\taxs[x_i,y_i].set_ylabel('(\"o\") Peripheral Scene Classification Accuracy (%)')\n\tax2 = axs[x_i,y_i].twinx() # instantiate a second axes that shares the same x-axis\n\tax2.set_ylabel('(\"*\") Foveal Scene Classification Accuracy (%)')\n\tax2.set(xlim=(-0.1,1.1),ylim=(0,Upper_lim))\n\taxs[x_i,y_i].set_xlabel('Percentage of Central Image Area with Foveal Class')\n\n\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\nplt.savefig('./Plots/' + Model_Type + '/Top_' + Top_k_str + '_Epoch_' + epoch_real_str + Experiment_name_mod_list + Training_Regimes_list + '_Runs_' + number_runs_str + '.png')\nplt.savefig('./Plots/' + Model_Type + '/Top_' + Top_k_str + '_Epoch_' + epoch_real_str + Experiment_name_mod_list + Training_Regimes_list + '_Runs_' + number_runs_str +'.svg')\n\nplt.show()\n\n\n########################\n# Use Individual Plots #\n########################\n\n# Define x-ticks:\nx = np.linspace(0,1,17)\n\nfig,axs = plt.subplots(1, len(Experiment_name_mod), figsize=(8, 7.5),squeeze=False)\n#fix,axs = plt.subplots(2, 4, figsize=(24, 15),squeeze=False)\nfig.suptitle(Model_Type)\n\nfig.suptitle(Network_Name_Mapping[Model_Type] + ' Robustness to Window @ Epoch ' + epoch_equivalent_str )\n\nfor i in range(len(Experiment_name_mod)):\n\tx_i,y_i = int(i/len(Experiment_name_mod)), i% len(Experiment_name_mod)\n\t#\n\tfor z in range(number_runs):\n\t\tfor j in range(len(Training_Regimes)):\n\t\t\taxs[x_i,y_i].plot(x,Acc_Regimes_Periphery[j,z,i,:],markersize=10,marker='o',color=Color_Legend[Training_Regimes[j]],alpha=0.5,linestyle=Line_Legend[Training_Regimes[j]],linewidth=1)\n\t#\n\taxs[x_i,y_i].legend([Training_Name_Legend[Training_Regimes[z]] for z in range(len(Training_Regimes))])\n\t#\n\tx_i,y_i = int(i/len(Experiment_name_mod)), i% len(Experiment_name_mod)\n\t#\n\tfor z in range(number_runs):\n\t\tfor j in range(len(Training_Regimes)):\n\t\t\taxs[x_i,y_i].plot(x,Acc_Regimes_Fovea[j,z,i,:],markersize=10,marker='*', color=Color_Legend[Training_Regimes[j]],alpha=Alpha_Legend[Training_Regimes[j]],linestyle=Line_Legend[Training_Regimes[j]],linewidth=2)\n\t\t\tprint(cross_over(Acc_Regimes_Periphery[:,z,:,:],Acc_Regimes_Fovea[:,z,:,:]))\n\t#\n\taxs[x_i,y_i].plot(x,np.repeat(Top_k/20.0*100.0,17),'--y',linewidth=2)\n\taxs[x_i,y_i].set(xlim=(-0.1,1.1),ylim=(0,Upper_lim))\n\taxs[x_i,y_i].set_title(Experiment_Name_Legend[Experiment_name_mod[i]])\n\taxs[x_i,y_i].set_xticks(np.arange(0,1.1,step=0.1))\n\taxs[x_i,y_i].set_ylabel('(\"o\") Peripheral Scene Classification Accuracy (%)')\n\tax2 = axs[x_i,y_i].twinx() # instantiate a second axes that shares the same x-axis\n\tax2.set_ylabel('(\"*\") Foveal Scene Classification Accuracy (%)')\n\tax2.set(xlim=(-0.1,1.1),ylim=(0,Upper_lim))\n\taxs[x_i,y_i].set_xlabel('Percentage of Central Image Area with Foveal Class')\n\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\nplt.savefig('./Plots_Individual/' + Model_Type + '/Top_' + Top_k_str + '_Epoch_' + epoch_real_str + Experiment_name_mod_list + Training_Regimes_list + '_Runs_' + number_runs_str + '.png')\nplt.savefig('./Plots_Individual/' + Model_Type + '/Top_' + Top_k_str + '_Epoch_' + epoch_real_str + Experiment_name_mod_list + Training_Regimes_list + '_Runs_' + number_runs_str + '.svg')\n\nplt.show()\n\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.savefig",
"numpy.sum",
"numpy.load",
"numpy.mean",
"numpy.std",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.abs",
"numpy.repeat",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
4ndrebar/2dGas | [
"83aa516426efb73f70f10e67879036449f22f1c9"
] | [
"2dgas.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 27 14:42:59 2020\r\nMicroscopic simulaiton of a 2d ideal gas in a fixed volume box.\r\n\r\nThe class swarm is composed by particle objects. \r\nIt has a interaction functions that iterates through particle pairs\r\nchecks for collisions and updates velocities\r\n\r\nISSUES:\r\n - slow rendering (matplotlib bottleneck) \r\n - parallelization for collision check could help\r\n\r\nhttps://gist.github.com/schirrecker/982847faeea703dd6f1dd8a09eab13aa\r\nhttps://nickcharlton.net/posts/drawing-animating-shapes-matplotlib.html\r\n@author: AB\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random as rnd\r\nimport matplotlib.animation as animation\r\nimport itertools\r\n\r\nclass Particle:\r\n \r\n def __init__(self,mass=1,radius=1,v=False): \r\n \"\"\"\r\n :param xy: Initial position.\r\n :param v: Initial velocity.\r\n \"\"\"\r\n self.xy=np.array([rnd.uniform(0,size),rnd.uniform(0,size)])\r\n if v==False:\r\n self.v=np.array([rnd.uniform(-0.5,0.5),rnd.uniform(-0.5,0.5)])\r\n else:\r\n self.v=v\r\n # self.v=np.array([0.5,0.5])\r\n self.mass=mass\r\n self.radius=radius\r\n # self.radius=mass #if the density is assumed constant\r\n \r\n # circle = plt.Circle(self.xy, self.radius, color='blue')\r\n # self.scatter,=circle\r\n @property\r\n def energy(self):\r\n return 0.5*np.linalg.norm(self.v)**2*self.mass\r\n @property\r\n def vel(self):\r\n return np.linalg.norm(self.v)\r\n \r\n @classmethod \r\n def create_swarm(cls,n,fix_mass=True,fix_radius=True, only_one_fast=False, all_right=False ): \r\n \"\"\"\r\n generates a list particles \r\n \r\n if fix_mass is True -> mass=1 by default\r\n if fix_radius is True -> radius=1 by default\r\n \r\n \"\"\"\r\n swarm=[]\r\n for i in range(n):\r\n if fix_mass and fix_radius:\r\n swarm.append(Particle()) \r\n elif fix_mass:\r\n swarm.append(Particle(radius=rnd.uniform(1,max_radius))) \r\n elif fix_radius:\r\n swarm.append(Particle(mass=rnd.uniform((1,max_mass))))\r\n if only_one_fast:\r\n for i in range(n):\r\n if i==0:\r\n swarm[i].v=np.array([10.,10.])\r\n else:\r\n swarm[i].v=np.array([.0,.0])\r\n if only_one_fast: #ignored if onlyonefast is true\r\n pass\r\n elif all_right:\r\n for i in range(n):\r\n swarm[i].v=np.array([1.,.0])\r\n \r\n return swarm\r\n \r\n def pop(self):\r\n return plt.Circle(tuple(self.xy), self.radius)\r\n @staticmethod \r\n def dist(ball1,ball2):\r\n dist=np.linalg.norm(ball1.xy-ball2.xy)\r\n return dist\r\n \r\n @staticmethod\r\n def collision(ball1,ball2): \r\n r12=ball1.xy-ball2.xy\r\n v12=-ball1.v+ball2.v\r\n r_vers=r12/np.linalg.norm(r12)\r\n v_vers=v12/np.linalg.norm(v12)\r\n if np.dot(v_vers,r_vers)<0: \r\n #if particles are merged pass\r\n #avoidss initialization issues\r\n pass\r\n else:\r\n #exchanged momentum q\r\n #collision solved in the frame of reference of ball2 \r\n q=-cor*2*(ball1.mass*ball2.mass)/(ball1.mass+ball2.mass)*(np.dot(-v12,r_vers))*r_vers \r\n ball1.v+=q/ball1.mass\r\n ball2.v-=q/ball2.mass\r\n \r\n \r\n def update(self):\r\n if self.xy[0] <= xlim[0]+self.radius:\r\n # hit the left wall, reflect x component\r\n self.v[0] = cor * np.abs(self.v[0])\r\n elif self.xy[0] >= xlim[1]-self.radius:\r\n self.v[0] = - cor * np.abs(self.v[0])\r\n if self.xy[1] <= ylim[0]+self.radius:\r\n # hit the left wall, reflect y component\r\n self.v[1] = cor * np.abs(self.v[1])\r\n elif self.xy[1] >= ylim[1]-self.radius:\r\n self.v[1] = - cor * np.abs(self.v[1])\r\n\r\n # delta t is 0.1\r\n delta_v = delta_t * ag\r\n self.v += delta_v\r\n\r\n self.xy += self.v\r\n\r\n self.xy[0] = np.clip(self.xy[0], xlim[0]+self.radius, xlim[1]-self.radius)\r\n self.xy[1] = np.clip(self.xy[1], ylim[0]+self.radius, ylim[1]-self.radius)\r\n\r\n self.pop()\r\n \r\n\r\ndef init(): \r\n\r\n velocities=np.array([])\r\n energies=np.array([])\r\n \r\n for ball in balls:\r\n velocities=np.append(velocities,ball.energy)\r\n energies=np.append(energies,ball.energy)\r\n axs[2].hist(energies)\r\n axs[1].hist(velocities)\r\n \r\n return []\r\n\r\n\r\ndef animate(t):\r\n # t is time in seconds\r\n \r\n velocities=energies=np.array([])\r\n for ball in balls:\r\n ball.update()\r\n velocities=np.append(velocities,ball.vel)\r\n energies=np.append(energies,ball.energy)\r\n\r\n for ball1, ball2 in itertools.combinations(balls,2): \r\n if Particle.dist(ball1,ball2)<ball1.radius+ball2.radius: #check for collision only if contact\r\n Particle.collision(ball1, ball2)\r\n else:\r\n pass \r\n axs[0].clear()\r\n plt.sca(axs[0])\r\n plt.xticks([],[])\r\n plt.yticks([],[])\r\n axs[0].set_xlim(xlim)\r\n axs[0].set_ylim(ylim)\r\n axs[2].cla()\r\n axs[1].cla()\r\n axs[1].hist(energies,bins=int(n_particles/3),density=True)\r\n axs[2].hist(velocities,bins=int(n_particles/3),density=True)\r\n axs[2].set_xlabel(\"Speed\")\r\n axs[1].set_xlabel(\"Energy\")\r\n\r\n [axs[0].add_patch(ball.pop()) for ball in balls] \r\n\r\n\r\nif __name__ == \"__main__\":\r\n ###setting the experiment\r\n n_particles=300\r\n g = 0 #gravity\r\n ag = np.array((0,-g))\r\n cor = 1 # coefficient of restitution (ratio of velocity after and before bounce)\r\n size=150 #box size\r\n \r\n max_mass=1\r\n max_radius=5\r\n # bounds of the box\r\n xlim = (0,size)\r\n ylim = (0,size)\r\n \r\n delta_t = 0.001 # 1 millisecond delta t\r\n \r\n fig , axs= plt.subplots(3,1)\r\n fig.set_dpi(100)\r\n fig.set_size_inches(7,21)\r\n \r\n plt.sca(axs[0])\r\n plt.xticks([],[])\r\n plt.yticks([],[])\r\n axs[0].set_xlim(xlim)\r\n axs[0].set_ylim(ylim)\r\n \r\n balls = Particle.create_swarm(n_particles,all_right=True )\r\n ani = animation.FuncAnimation(fig, animate, frames=np.arange(0,1,delta_t), \r\n init_func=init, blit=False,repeat=False)\r\n Writer = animation.writers['ffmpeg']\r\n writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\r\n ani.save('allright_2.mp4', writer=writer)\r\n\r\n plt.show()\r\n"
] | [
[
"numpy.append",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.abs",
"numpy.clip",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks"
]
] |
JiazeWang/reagent | [
"b92c18a339e009504ca51ba5101f8c171b88721e"
] | [
"registration/train_pn_2D_action_divide.py"
] | [
"import numpy as np\nnp.random.seed(42)\nimport torch\ntorch.manual_seed(42)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\ntorch.set_default_dtype(torch.float32)\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport os\nfrom tqdm import tqdm\nfrom prefetch_generator import BackgroundGenerator\nimport argparse\n\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)).replace(\"/registration\", \"\"))\nfrom environment import environment as env\nfrom environment import transformations as tra\nfrom environment.buffer import Buffer\nfrom registration.model_pn_2D_action_divide import Agent\nimport registration.model_pn_2D_action_divide as util_model\nimport utility.metrics as metrics\nfrom utility.logger import Logger\nfrom dataset.dataset import DatasetModelnet40, DatasetLinemod\nimport config as cfg\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef train(agent, logger, dataset, noise_type, epochs, lr, lr_step, alpha, model_path, reward_mode=\"\"):\n optimizer = torch.optim.Adam(agent.parameters(), lr=lr, amsgrad=True)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_step, 0.5)\n\n Dataset = DatasetModelnet40 if dataset == \"m40\" else DatasetLinemod\n train_dataset = Dataset(\"train\", noise_type)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.BATCH_SIZE, shuffle=True)\n val_dataset = Dataset(\"val\", noise_type)\n val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=cfg.BATCH_SIZE, shuffle=False)\n test_dataset = Dataset(\"test\" if dataset == \"m40\" else \"eval\", noise_type)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=cfg.BATCH_SIZE, shuffle=False)\n\n RANDOM_STATE = np.random.get_state() # otherwise loader produces deterministic samples after iter 1\n losses_bc, losses_ppo, train_rewards, final_rewards = [], [], [], []\n episode = 0 # for loss logging (not using epoch)\n best_chamfer = np.infty\n\n buffer = Buffer()\n buffer.start_trajectory()\n for epoch in range(epochs):\n print(f\"Epoch {epoch}\")\n\n # -- train\n agent.train()\n np.random.set_state(RANDOM_STATE)\n\n progress = tqdm(BackgroundGenerator(train_loader), total=len(train_loader))\n for data in progress:\n with torch.no_grad():\n # per sample, generate a full trajectory\n source, target, pose_source, pose_target = env.init(data)\n\n if cfg.DISENTANGLED:\n pose_target = tra.to_disentangled(pose_target, source)\n current_source = source\n if reward_mode == \"goal\":\n reward = env.reward_goal(pose_source, pose_target)\n elif reward_mode == \"step\":\n gt_pcd_source = tra.apply_trafo(current_source, pose_target, disentangled=cfg.DISENTANGLED)\n _, prev_chamfer = env.reward_step(current_source, gt_pcd_source)\n\n # STAGE 1: generate trajectories\n for step in range(cfg.ITER_TRAIN):\n # expert prediction\n expert_action = env.expert(pose_source, pose_target, mode=cfg.EXPERT_MODE)\n\n # student prediction -- stochastic policy\n state_emb, action_logit, state_value, _ = agent(current_source, target)\n\n action = util_model.action_from_logits(action_logit, deterministic=False)\n action_logprob, action_entropy = util_model.action_stats(action_logit, action)\n\n # step environment and get reward\n new_source, pose_source = env.step(source, action, pose_source, cfg.DISENTANGLED)\n if reward_mode == \"goal\":\n reward = env.reward_goal(pose_source, pose_target)\n elif reward_mode == \"step\":\n reward, prev_chamfer = env.reward_step(new_source, gt_pcd_source, prev_chamfer)\n else:\n reward = torch.zeros((pose_source.shape[0], 1, 1)).to(DEVICE)\n\n # log trajectory\n buffer.log_step([current_source, target], state_value, reward,\n expert_action,\n action, action_logit, action_logprob)\n\n current_source = new_source\n\n train_rewards.append(reward.view(-1))\n final_rewards.append(reward.view(-1))\n\n if len(buffer) == cfg.NUM_TRAJ:\n # STAGE 2: policy (and value estimator) update using BC (and PPO)\n\n # convert buffer to tensor of samples (also computes return and advantage over trajectories)\n samples = buffer.get_samples()\n ppo_dataset = torch.utils.data.TensorDataset(*samples)\n ppo_loader = torch.utils.data.DataLoader(ppo_dataset, batch_size=cfg.BATCH_SIZE, shuffle=True,\n drop_last=False)\n\n # sample batches from buffer and update\n for batch in ppo_loader:\n sources, targets, \\\n expert_actions, state_values, \\\n actions, action_logits, action_logprobs, \\\n returns, advantages = batch\n\n # -- predict using current policy\n new_state_emb, new_action_logit, new_values, _ = agent(sources, targets)\n new_action_logprob, new_action_entropy = util_model.action_stats(new_action_logit, actions)\n\n # -- clone term\n loss_translation = F.cross_entropy(new_action_logit[0].view(-1, 11, 1, 1, 1),\n expert_actions[:, 0].reshape(-1, 1, 1, 1))\n loss_rotation = F.cross_entropy(new_action_logit[1].view(-1, 11, 1, 1, 1),\n expert_actions[:, 1].reshape(-1, 1, 1, 1))\n clone_loss = (loss_translation + loss_rotation) / 2\n\n if alpha > 0:\n # -- policy term\n # ratio: lp > prev_lp --> probability of selecting that action increased\n ratio = torch.exp(new_action_logprob - action_logprobs).reshape(-1, 6)\n policy_loss = -torch.min(ratio * advantages.repeat(1, 6),\n ratio.clamp(1 - cfg.CLIP_EPS,\n 1 + cfg.CLIP_EPS) * advantages.repeat(1, 6)).mean()\n\n # -- value term\n value_loss = (new_values.view(-1, 1) - returns).pow(2)\n if cfg.CLIP_VALUE:\n values_clipped = state_values + (new_values - state_values)\\\n .clamp(-cfg.CLIP_EPS, cfg.CLIP_EPS)\n losses_v_clipped = (values_clipped.view(-1, 1) - returns).pow(2)\n value_loss = torch.max(value_loss, losses_v_clipped)\n value_loss = value_loss.mean()\n\n # -- entropy term\n entropy_loss = new_action_entropy.mean()\n\n # -- update\n optimizer.zero_grad()\n loss = clone_loss\n losses_bc.append(clone_loss.item())\n if alpha > 0:\n ppo_loss = policy_loss + value_loss * cfg.C_VALUE - entropy_loss * cfg.C_ENTROPY\n loss += ppo_loss * alpha\n losses_ppo.append(ppo_loss.item())\n loss.backward()\n optimizer.step()\n\n # logging\n if alpha > 0:\n logger.record(\"train/ppo\", np.mean(losses_ppo))\n logger.record(\"train/bc\", np.mean(losses_bc))\n logger.record(\"train/reward\", float(torch.cat(train_rewards, dim=0).mean()))\n logger.record(\"train/final_reward\", float(torch.cat(final_rewards, dim=0).mean()))\n logger.dump(step=episode)\n\n # reset\n losses_bc, losses_ppo, train_rewards, final_rewards = [], [], [], []\n buffer.clear()\n episode += 1\n\n buffer.start_trajectory()\n scheduler.step()\n RANDOM_STATE = np.random.get_state() # evaluation sets seeds again -- keep random state of the training stage\n\n # -- test\n if val_loader is not None:\n chamfer_val = evaluate(agent, logger, val_loader, prefix='val')\n if test_loader is not None:\n chamfer_test = evaluate(agent, logger, test_loader)\n\n if chamfer_test <= best_chamfer:\n print(f\"new best: {chamfer_test}\")\n best_chamfer = chamfer_test\n infos = {\n 'epoch': epoch,\n 'optimizer_state_dict': optimizer.state_dict()\n }\n util_model.save(agent, f\"{model_path}_qmixv1_e100.zip\", infos)\n #model_epoch_path = os.path.join(code_path, f\"weights/e100_shared_mgpus_pn_2d_{dataset}_{mode}_{str(epoch)}\")\n #util_model.save(agent, f\"{model_epoch_path}.zip\", infos)\n logger.dump(step=epoch)\n\n\ndef evaluate(agent, logger, loader, prefix='test'):\n agent.eval()\n progress = tqdm(BackgroundGenerator(loader), total=len(loader))\n predictions = []\n val_losses = []\n with torch.no_grad():\n for data in progress:\n source, target, pose_source, pose_target = env.init(data)\n if cfg.DISENTANGLED:\n pose_target = tra.to_disentangled(pose_target, source)\n\n current_source = source\n for step in range(cfg.ITER_EVAL):\n expert_action = env.expert(pose_source, pose_target, mode=cfg.EXPERT_MODE)\n\n state_emb, action_logit, _, _ = agent(current_source, target)\n action = util_model.action_from_logits(action_logit, deterministic=True)\n\n loss_translation = F.cross_entropy(action_logit[0].view(-1, 11, 1, 1, 1),\n expert_action[:, 0].reshape(-1, 1, 1, 1))\n loss_rotation = F.cross_entropy(action_logit[1].view(-1, 11, 1, 1, 1),\n expert_action[:, 1].reshape(-1, 1, 1, 1))\n val_losses.append((loss_translation + loss_rotation).item()/2)\n\n current_source, pose_source = env.step(source, action, pose_source, cfg.DISENTANGLED)\n if cfg.DISENTANGLED:\n pose_source = tra.to_global(pose_source, source)\n predictions.append(pose_source)\n\n predictions = torch.cat(predictions)\n _, summary_metrics = metrics.compute_stats(predictions, data_loader=loader)\n\n # log test metrics\n if isinstance(loader.dataset, DatasetLinemod):\n logger.record(f\"{prefix}/add\", summary_metrics['add'])\n logger.record(f\"{prefix}/adi\", summary_metrics['adi'])\n return summary_metrics['add']\n else:\n logger.record(f\"{prefix}/mae-r\", summary_metrics['r_mae'])\n logger.record(f\"{prefix}/mae-t\", summary_metrics['t_mae'])\n logger.record(f\"{prefix}/iso-r\", summary_metrics['r_iso'])\n logger.record(f\"{prefix}/iso-t\", summary_metrics['t_iso'])\n logger.record(f\"{prefix}/chamfer\", summary_metrics['chamfer_dist'])\n logger.record(f\"{prefix}/adi-auc\", summary_metrics['adi_auc10'] * 100)\n return summary_metrics['chamfer_dist']\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='ReAgent - training on ModelNet40 and LINEMOD')\n parser.add_argument('--mode', type=str, default='il', choices=['pretrain', 'il', 'ilrl'],\n help='pretraining (pretrain), IL-only (il), IL+RL with a step-wise reward (ilrls).')\n parser.add_argument('--dataset', type=str, default='m40', choices=['m40', 'lm'],\n help='Dataset used for training. All experiments on ModelNet40 and ScanObjectNN use the same '\n 'weights - train both with \"m40\". Experiments on LINEMOD (\"lm\") use no pretraining.')\n args = parser.parse_args()\n\n # PATHS\n dataset = args.dataset\n mode = args.mode\n code_path = os.path.dirname(os.path.abspath(__file__)).replace(\"/registration\", \"\")\n if not os.path.exists(os.path.join(code_path, \"logs\")):\n os.mkdir(os.path.join(code_path, \"logs\"))\n if not os.path.exists(os.path.join(code_path, \"weights\")):\n\n\n os.mkdir(os.path.join(code_path, \"weights\"))\n model_path = os.path.join(code_path, f\"weights/{dataset}_{mode}\")\n logger = Logger(log_dir=os.path.join(code_path, f\"logs/{dataset}/\"), log_name=f\"qmixv1_E100_{mode}\",\n reset_num_timesteps=True)\n if torch.cuda.device_count() > 1:\n print(\"Using multiple GPUs\")\n else:\n print(\"Using single GPU\")\n # TRAINING\n agent = nn.DataParallel(Agent()).to(DEVICE)\n\n if args.mode == \"pretrain\" and dataset == \"m40\":\n print(f\"Training: dataset '{dataset}' - mode '{args.mode}'\")\n train(agent, logger, dataset, noise_type=\"clean\", epochs=100, lr=1e-3, lr_step=20, alpha=0,\n model_path=model_path)\n else:\n if args.mode == \"il\":\n alpha = 0.0\n reward_mode = \"\"\n elif args.mode == \"ilrl\":\n alpha = 2.0 if dataset == \"m40\" else 0.1 # reduced influence on lm\n reward_mode = \"step\"\n else:\n raise ValueError(\"No pretraining on LINEMOD. Use 'il' or 'ilrl' instead.\")\n print(f\"Training: dataset '{dataset}' - mode '{args.mode}'{f' - alpha={alpha}' if args.mode != 'il' else ''}\")\n\n if dataset == \"m40\":\n print(\" loading pretrained weights...\")\n if os.path.exists(os.path.join(code_path, f\"weights/m40_pretrain_qmixv1_e100.zip\")):\n util_model.load(agent, os.path.join(code_path, f\"weights/m40_pretrain_qmixv1_e100.zip\"))\n else:\n raise FileNotFoundError(f\"No pretrained weights found at \"\n f\"{os.path.join(code_path, f'weights/m40_pretrain.zip')}. Run with \"\n f\"'pretrain' first or download the provided weights.\")\n\n noise_type = \"jitter\" if dataset == \"m40\" else \"segmentation\"\n epochs = 100 if dataset == \"m40\" else 200\n lr = 1e-4 if dataset == \"m40\" else 1e-3\n lr_step = 20 if dataset == \"m40\" else 40\n\n train(agent, logger, dataset, noise_type, epochs=epochs, lr=lr, lr_step=lr_step,\n alpha=alpha, reward_mode=reward_mode, model_path=model_path)\n"
] | [
[
"torch.zeros",
"torch.cat",
"torch.optim.lr_scheduler.StepLR",
"torch.max",
"numpy.random.seed",
"torch.no_grad",
"numpy.mean",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.set_default_dtype",
"numpy.random.get_state",
"torch.exp",
"numpy.random.set_state"
]
] |
quanmario0311/medTrans | [
"18bd98789c10dfcff13f03f7efe535e6a77d0a86"
] | [
"lightning_med/models/xcit.py"
] | [
"\"\"\" Cross-Covariance Image Transformer (XCiT) in PyTorch\n\nMostly from timm, with some minor changes\n - https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xcit.py\n\nSame as the official implementation, with some minor adaptations.\n - https://github.com/facebookresearch/xcit/blob/master/xcit.py\n\nPaper:\n - https://arxiv.org/abs/2106.09681\n\"\"\"\n# Copyright (c) 2015-present, Facebook, Inc.\n# All rights reserved.\n\nimport math\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom timm.models.helpers import build_model_with_cfg\nfrom timm.models.vision_transformer import _cfg, Mlp\nfrom timm.models.registry import register_model\nfrom timm.models.layers import DropPath, trunc_normal_, to_2tuple\nfrom timm.models.cait import ClassAttn\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,\n 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head',\n **kwargs\n }\n\n\ndefault_cfgs = {\n # Patch size 16\n 'xcit_nano_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), \n 'xcit_nano_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'),\n 'xcit_nano_12_p16_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_tiny_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'),\n 'xcit_tiny_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'),\n 'xcit_tiny_12_p16_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_tiny_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'),\n 'xcit_tiny_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'),\n 'xcit_tiny_24_p16_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_small_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'),\n 'xcit_small_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'),\n 'xcit_small_12_p16_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_small_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'),\n 'xcit_small_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'),\n 'xcit_small_24_p16_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_medium_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'),\n 'xcit_medium_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'),\n 'xcit_medium_24_p16_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_large_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'),\n 'xcit_large_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'),\n 'xcit_large_24_p16_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)),\n\n # Patch size 8\n 'xcit_nano_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), \n 'xcit_nano_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'),\n 'xcit_nano_12_p8_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_tiny_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'),\n 'xcit_tiny_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'),\n 'xcit_tiny_12_p8_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_tiny_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'),\n 'xcit_tiny_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'),\n 'xcit_tiny_24_p8_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_small_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'),\n 'xcit_small_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'),\n 'xcit_small_12_p8_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_small_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'),\n 'xcit_small_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'),\n 'xcit_small_24_p8_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_medium_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'),\n 'xcit_medium_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'),\n 'xcit_medium_24_p8_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)),\n 'xcit_large_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'),\n 'xcit_large_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'),\n 'xcit_large_24_p8_384_dist': _cfg(\n url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)),\n}\n\n\nclass PositionalEncodingFourier(nn.Module):\n \"\"\"\n Positional encoding relying on a fourier kernel matching the one used in the \"Attention is all of Need\" paper.\n Based on the official XCiT code\n - https://github.com/facebookresearch/xcit/blob/master/xcit.py\n \"\"\"\n\n def __init__(self, hidden_dim=32, dim=768, temperature=10000):\n super().__init__()\n self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)\n self.scale = 2 * math.pi\n self.temperature = temperature\n self.hidden_dim = hidden_dim\n self.dim = dim\n self.eps = 1e-6\n\n def forward(self, B: int, H: int, W: int):\n device = self.token_projection.weight.device\n y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W)\n x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1)\n y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale\n x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale\n dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device)\n dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3)\n pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n pos = self.token_projection(pos)\n return pos.repeat(B, 1, 1, 1) # (B, C, H, W)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution + batch norm\"\"\"\n return torch.nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(out_planes)\n )\n\n\nclass ConvPatchEmbed(nn.Module):\n \"\"\"Image to Patch Embedding using multiple convolutional layers\"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU):\n super().__init__()\n img_size = to_2tuple(img_size)\n num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size)\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n\n if patch_size == 16:\n self.proj = torch.nn.Sequential(\n conv3x3(in_chans, embed_dim // 8, 2),\n act_layer(),\n conv3x3(embed_dim // 8, embed_dim // 4, 2),\n act_layer(),\n conv3x3(embed_dim // 4, embed_dim // 2, 2),\n act_layer(),\n conv3x3(embed_dim // 2, embed_dim, 2),\n )\n elif patch_size == 8:\n self.proj = torch.nn.Sequential(\n conv3x3(in_chans, embed_dim // 4, 2),\n act_layer(),\n conv3x3(embed_dim // 4, embed_dim // 2, 2),\n act_layer(),\n conv3x3(embed_dim // 2, embed_dim, 2),\n )\n else:\n raise('For convolutional projection, patch size has to be in [8, 16]')\n\n def forward(self, x):\n x = self.proj(x)\n Hp, Wp = x.shape[2], x.shape[3]\n x = x.flatten(2).transpose(1, 2) # (B, N, C)\n return x, (Hp, Wp)\n\n\nclass LPI(nn.Module):\n \"\"\"\n Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the\n implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable\n 3x3 convolutions with GeLU and BatchNorm2d\n \"\"\"\n\n def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):\n super().__init__()\n out_features = out_features or in_features\n\n padding = kernel_size // 2\n\n self.conv1 = torch.nn.Conv2d(\n in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)\n self.act = act_layer()\n self.bn = nn.BatchNorm2d(in_features)\n self.conv2 = torch.nn.Conv2d(\n in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)\n\n def forward(self, x, H: int, W: int):\n B, N, C = x.shape\n x = x.permute(0, 2, 1).reshape(B, C, H, W)\n x = self.conv1(x)\n x = self.act(x)\n x = self.bn(x)\n x = self.conv2(x)\n x = x.reshape(B, C, N).permute(0, 2, 1)\n return x\n\n\nclass ClassAttentionBlock(nn.Module):\n \"\"\"Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239\"\"\"\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False):\n super().__init__()\n self.norm1 = norm_layer(dim)\n\n self.attn = ClassAttn(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)\n\n if eta is not None: # LayerScale Initialization (no layerscale when None)\n self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)\n self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)\n else:\n self.gamma1, self.gamma2 = 1.0, 1.0\n\n # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721\n self.tokens_norm = tokens_norm\n\n def forward(self, x):\n x_norm1 = self.norm1(x)\n x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1)\n x = x + self.drop_path(self.gamma1 * x_attn)\n if self.tokens_norm:\n x = self.norm2(x)\n else:\n x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1)\n x_res = x\n cls_token = x[:, 0:1]\n cls_token = self.gamma2 * self.mlp(cls_token)\n x = torch.cat([cls_token, x[:, 1:]], dim=1)\n x = x_res + self.drop_path(x)\n return x\n\n\nclass XCA(nn.Module):\n \"\"\" Cross-Covariance Attention (XCA)\n Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax\n normalized) Cross-covariance matrix (Q^T \\\\cdot K \\\\in d_h \\\\times d_h)\n \"\"\"\n\n def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = num_heads\n self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N)\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1)\n q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)\n \n # Paper section 3.2 l2-Normalization and temperature scaling\n q = torch.nn.functional.normalize(q, dim=-1)\n k = torch.nn.functional.normalize(k, dim=-1)\n attn = (q @ k.transpose(-2, -1)) * self.temperature\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n # (B, H, C', N), permute -> (B, N, H, C')\n x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'temperature'}\n\n\nclass XCABlock(nn.Module):\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n\n self.norm3 = norm_layer(dim)\n self.local_mp = LPI(in_features=dim, act_layer=act_layer)\n\n self.norm2 = norm_layer(dim)\n self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)\n\n self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)\n self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)\n self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)\n\n def forward(self, x, H: int, W: int):\n x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))\n # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights\n # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721\n x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))\n x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))\n return x\n\n\nclass XCiT(nn.Module):\n \"\"\"\n Based on timm and DeiT code bases\n https://github.com/rwightman/pytorch-image-models/tree/master/timm\n https://github.com/facebookresearch/deit/\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,\n act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate (constant across all layers)\n norm_layer: (nn.Module): normalization layer\n cls_attn_layers: (int) Depth of Class attention layers\n use_pos_embed: (bool) whether to use positional encoding\n eta: (float) layerscale initialization value\n tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA\n\n Notes:\n - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch\n interaction (class LPI) and the patch embedding (class ConvPatchEmbed)\n \"\"\"\n super().__init__()\n img_size = to_2tuple(img_size)\n assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \\\n '`patch_size` should divide image dimensions evenly'\n\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.patch_embed = ConvPatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer)\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.use_pos_embed = use_pos_embed\n if use_pos_embed:\n self.pos_embed = PositionalEncodingFourier(dim=embed_dim)\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n self.blocks = nn.ModuleList([\n XCABlock(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta)\n for _ in range(depth)])\n\n self.cls_attn_blocks = nn.ModuleList([\n ClassAttentionBlock(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm)\n for _ in range(cls_attn_layers)])\n\n # Classifier head\n self.norm = norm_layer(embed_dim)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n # Init weights\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n B = x.shape[0]\n # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches)\n x, (Hp, Wp) = self.patch_embed(x)\n\n if self.use_pos_embed:\n # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C)\n pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)\n x = x + pos_encoding\n\n x = self.pos_drop(x)\n\n for blk in self.blocks:\n x = blk(x, Hp, Wp)\n\n cls_tokens = self.cls_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n\n for blk in self.cls_attn_blocks:\n x = blk(x)\n\n x = self.norm(x)[:, 0]\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n\ndef checkpoint_filter_fn(state_dict, model):\n if 'model' in state_dict:\n state_dict = state_dict['model']\n # For consistency with timm's transformer models while being compatible with official weights source we rename\n # pos_embeder to pos_embed. Also account for use_pos_embed == False\n use_pos_embed = getattr(model, 'pos_embed', None) is not None\n pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')]\n for k in pos_embed_keys:\n if use_pos_embed:\n state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k)\n else:\n del state_dict[k]\n # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors\n # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v\n if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict():\n num_ca_blocks = len(model.cls_attn_blocks)\n for i in range(num_ca_blocks):\n qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight')\n qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1])\n for j, subscript in enumerate('qkv'):\n state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j]\n qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None)\n if qkv_bias is not None:\n qkv_bias = qkv_bias.reshape(3, -1)\n for j, subscript in enumerate('qkv'):\n state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j]\n return state_dict\n\n\ndef _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs):\n default_cfg = default_cfg or default_cfgs[variant]\n model = build_model_with_cfg(\n XCiT, variant, pretrained, default_cfg=default_cfg, pretrained_filter_fn=checkpoint_filter_fn, **kwargs)\n return model\n\n\n@register_model\ndef xcit_nano_12_p16_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs)\n model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_nano_12_p16_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs)\n model = _create_xcit('xcit_nano_12_p16_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_nano_12_p16_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384, **kwargs)\n model = _create_xcit('xcit_nano_12_p16_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_12_p16_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_12_p16_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_12_p16_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_12_p16_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_12_p16_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_12_p16_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_12_p16_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_12_p16_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_12_p16_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_12_p16_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_24_p16_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_24_p16_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_24_p16_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_24_p16_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_24_p16_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_24_p16_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_24_p16_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_24_p16_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_24_p16_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_24_p16_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_medium_24_p16_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_medium_24_p16_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_medium_24_p16_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_medium_24_p16_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_medium_24_p16_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_large_24_p16_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_large_24_p16_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_large_24_p16_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_large_24_p16_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_large_24_p16_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n# Patch size 8x8 models\n@register_model\ndef xcit_nano_12_p8_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs)\n model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_nano_12_p8_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs)\n model = _create_xcit('xcit_nano_12_p8_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_nano_12_p8_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs)\n model = _create_xcit('xcit_nano_12_p8_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_12_p8_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_12_p8_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_12_p8_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_12_p8_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_12_p8_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_12_p8_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_12_p8_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_12_p8_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_12_p8_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_12_p8_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_24_p8_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_24_p8_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_24_p8_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_tiny_24_p8_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_tiny_24_p8_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_24_p8_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_24_p8_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_24_p8_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_small_24_p8_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_small_24_p8_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_medium_24_p8_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_medium_24_p8_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_medium_24_p8_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_medium_24_p8_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_medium_24_p8_384_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_large_24_p8_224(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_large_24_p8_224_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_large_24_p8_224_dist', pretrained=pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef xcit_large_24_p8_384_dist(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs)\n model = _create_xcit('xcit_large_24_p8_384_dist', pretrained=pretrained, **model_kwargs)\n return model"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Identity",
"torch.zeros",
"torch.arange",
"torch.nn.init.constant_",
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.nn.Conv2d",
"torch.div"
]
] |
a-paxton/oss-community-health | [
"93ff4d266b5390b53d8ed59f71616de68bcfdda7"
] | [
"scripts/survivor_analysis/utils/project_features.py"
] | [
"import numpy as np\n\n\ndef compute_bus_factor(commits, n_committers=5):\n \"\"\"\n Compute bus factor\n\n Parameters\n ----------\n commits : pd.DataFrame\n Data Frame containing the commit information.\n\n n_committers : int, optional, default: 5\n Number of committers to consider in the bus factor\n\n Returns\n -------\n bus_factor: Lower is better\n \"\"\"\n\n commits[\"author_name\"] = commits[\"author_name\"].replace(\n np.nan, \"\", regex=True)\n _, commits_counts = np.unique(commits[\"author_name\"], return_counts=True)\n commits_counts.sort()\n commits_counts = commits_counts[::-1].astype(float)\n commits_counts /= commits_counts.max()\n bus_factor = np.mean(1 - commits_counts[:n_committers])\n return bus_factor\n"
] | [
[
"numpy.mean",
"numpy.unique"
]
] |
kaitodeesu/project2021 | [
"1b6a850f7c7aaced7173e424c0eca21e8349f071"
] | [
"Untitled2.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport numpy as np\nfrom numpy import linalg as LA\n\n\ndimension=2 #次元を指定する\n\ndef randomnumber(dimension): #ランダムな行列の生成\n return np.random.random((dimension,dimension))\n\n\ndef gram(a,b): #規格化\n return ((np.dot(a,b)/np.dot(a,a))*a)\n\n\ndef hermatite(a): #複素共役\n return np.conjugate(a.T)\n\nv=randomnumber(dimension)\ne=np.zeros((dimension,dimension),dtype='float64')#エルミット演算子を生成する単位ベクトル\n\nu=np.zeros((dimension,dimension),dtype='float64')#規格化するためのベクトル\nu[0]=v[0]\n\nx=0\nsum=np.array([0,0],dtype='float64')\n\nfor a in range(1,dimension):\n for b in range(0,a):\n sum+=gram(u[b],v[a])\n u[a]=v[a]-sum\n \n\nfor c in range(0,dimension):\n e[c]=u[c]/LA.norm(u[c],2)#·ord=2\n \nprint(e)\n\n\n# In[8]:\n\n\nfor c in range(0,dimension):\n e[c]=u[c]/LA.norm(u[c],2)#·ord=2\n \nprint(e)\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.zeros",
"numpy.conjugate",
"numpy.random.random"
]
] |
utcsilab/deep-jsense | [
"9e50b96adb944baeea3e365b4ce59627e310107d"
] | [
"models.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 23 16:17:34 2020\n\n@author: yanni\n\"\"\"\n\nimport torch\nimport sigpy as sp\nimport numpy as np\nimport copy as copy\n\nfrom core_ops import TorchHybridSense, TorchHybridImage\nfrom core_ops import TorchMoDLSense, TorchMoDLImage\nfrom utils import fft, ifft\n\nfrom opt import ZConjGrad\nfrom resnet import ResNet\n\n# Unrolled J-Sense in MoDL style\nclass MoDLDoubleUnroll(torch.nn.Module):\n def __init__(self, hparams):\n super(MoDLDoubleUnroll, self).__init__()\n # Storage\n self.verbose = hparams.verbose\n self.batch_size = hparams.batch_size\n self.block1_max_iter = hparams.block1_max_iter\n self.block2_max_iter = hparams.block2_max_iter\n self.cg_eps = hparams.cg_eps\n\n # Modes\n self.mode = hparams.mode\n self.use_img_net = hparams.use_img_net\n self.use_map_net = hparams.use_map_net\n # Map modes\n self.map_mode = hparams.map_mode\n self.map_norm = hparams.map_norm\n # Initial variables\n self.map_init = hparams.map_init\n self.img_init = hparams.img_init\n # Logging\n self.logging = hparams.logging\n \n # ImageNet parameters\n self.img_channels = hparams.img_channels\n self.img_blocks = hparams.img_blocks\n self.img_sep = hparams.img_sep\n # Attention parameters\n self.att_config = hparams.att_config\n \n # Size parameters\n self.mps_kernel_shape = hparams.mps_kernel_shape # B x C x h x w\n # Get useful values\n self.num_coils = self.mps_kernel_shape[-3]\n self.ones_mask = torch.ones((1)).cuda()\n \n # Initialize trainable parameters\n if hparams.l2lam_train:\n self.block1_l2lam = torch.nn.Parameter(torch.tensor(\n hparams.l2lam_init * \n np.ones((1))).cuda())\n self.block2_l2lam = torch.nn.Parameter(torch.tensor(\n hparams.l2lam_init * \n np.ones((1))).cuda())\n else:\n self.block1_l2lam = torch.tensor(\n hparams.l2lam_init * \n np.ones((1))).cuda()\n self.block2_l2lam = torch.tensor(\n hparams.l2lam_init * \n np.ones((1))).cuda()\n \n # Initialize image module\n if hparams.use_img_net:\n # Initialize ResNet module\n if self.img_sep: # Do we use separate networks at each unroll?\n self.image_net = torch.nn.ModuleList(\n hparams.meta_unrolls_end * [\n ResNet(in_channels=2, \n latent_channels=self.img_channels,\n num_blocks=self.img_blocks,\n kernel_size=3, batch_norm=False)])\n else:\n self.image_net = ResNet(in_channels=2, \n latent_channels=self.img_channels,\n num_blocks=self.img_blocks,\n kernel_size=3, batch_norm=False)\n else:\n # Bypass\n self.image_net = torch.nn.Identity()\n \n # Initialize map module\n if hparams.use_map_net:\n # Intialize ResNet module\n self.maps_net = ResNet(in_channels=2, \n latent_channels=self.img_channels,\n num_blocks=self.img_blocks,\n kernel_size=3, batch_norm=False)\n else:\n # Bypass\n self.maps_net = torch.nn.Identity()\n \n # Initial 'fixed' maps\n # See in 'forward' for the exact initialization depending on mode\n self.init_maps_kernel = 0. * torch.randn((self.batch_size,) + \n tuple(self.mps_kernel_shape) + (2,)).cuda()\n self.init_maps_kernel = torch.view_as_complex(self.init_maps_kernel)\n \n # Get torch operators for the entire batch\n def get_core_torch_ops(self, mps_kernel, img_kernel, mask, direction):\n # List of output ops\n normal_ops, adjoint_ops, forward_ops = [], [], []\n \n # For each sample in batch\n for idx in range(self.batch_size):\n if self.mode == 'DeepJSense':\n # Type\n if direction == 'ConvSense':\n forward_op, adjoint_op, normal_op = \\\n TorchHybridSense(self.img_kernel_shape,\n mps_kernel[idx], mask[idx],\n self.img_conv_shape,\n self.ksp_padding, self.maps_padding)\n elif direction == 'ConvImage':\n forward_op, adjoint_op, normal_op = \\\n TorchHybridImage(self.mps_kernel_shape,\n img_kernel[idx], mask[idx],\n self.img_conv_shape,\n self.ksp_padding, self.maps_padding)\n elif self.mode == 'MoDL':\n # Type\n if direction == 'ConvSense':\n forward_op, adjoint_op, normal_op = \\\n TorchMoDLSense(mps_kernel[idx], mask[idx])\n elif direction == 'ConvImage':\n forward_op, adjoint_op, normal_op = \\\n TorchMoDLImage(img_kernel[idx], mask[idx])\n \n # Add to lists\n normal_ops.append(normal_op)\n adjoint_ops.append(adjoint_op)\n forward_ops.append(forward_op)\n \n # Return operators\n return normal_ops, adjoint_ops, forward_ops\n \n # Given a batch of inputs and ops, get a single batch operator\n def get_batch_op(self, input_ops, batch_size):\n # Inner function trick\n def core_function(x):\n # Store in list\n output_list = []\n for idx in range(batch_size):\n output_list.append(input_ops[idx](x[idx])[None, ...])\n # Stack and return\n return torch.cat(output_list, dim=0)\n return core_function\n \n def forward(self, data, meta_unrolls=1):\n # Use the full accelerated k-space\n ksp = data['ksp']\n mask = data['mask'] # 2D mask (or whatever, easy to adjust)\n \n # Get image kernel shape - dynamic and includes padding\n if self.mode == 'DeepJSense':\n self.img_kernel_shape = [ksp.shape[-3]+self.mps_kernel_shape[-2]-1,\n ksp.shape[-2]+self.mps_kernel_shape[-1]-1] # H x W\n self.img_conv_shape = [self.num_coils, \n self.img_kernel_shape[-2]-self.mps_kernel_shape[-2]+1,\n self.img_kernel_shape[-1]-self.mps_kernel_shape[-1]+1] # After convoluting with map kernel\n \n # Compute all required padding parameters\n self.padding = (self.img_conv_shape[-2] - 1,\n self.img_conv_shape[-1] - 1) # Outputs a small kernel\n \n # Decide based on the number of k-space lines\n if np.mod(ksp.shape[-2], 2) == 0:\n self.maps_padding = (int(np.ceil(self.padding[-2] / 2)),\n int(np.floor(self.padding[-2] / 2)),\n int(np.ceil(self.padding[-1] / 2)),\n int(np.floor(self.padding[-1] / 2)))\n self.ksp_padding = (int(np.ceil((self.img_kernel_shape[-2] - self.img_conv_shape[-2])/2)),\n int(np.floor((self.img_kernel_shape[-2] - self.img_conv_shape[-2])/2)),\n int(np.ceil((self.img_kernel_shape[-1] - self.img_conv_shape[-1])/2)),\n int(np.floor((self.img_kernel_shape[-1] - self.img_conv_shape[-1])/2)))\n else:\n # !!! Input ksp has to be of even shape\n assert False\n \n elif self.mode == 'MoDL': # No padding\n pass # Nothing needed\n \n # Initializers\n with torch.no_grad():\n # View input as complex\n ksp = torch.view_as_complex(ksp)\n \n # Initial maps\n if self.map_init == 'fixed':\n est_maps_kernel = self.init_maps_kernel\n elif self.map_init == 'estimated':\n # From dataloader\n est_maps_kernel = data['init_maps'].type(torch.complex64)\n elif self.map_init == 'espirit':\n # From dataloader\n est_maps_kernel = data['s_maps_cplx']\n \n # Initial image\n if self.img_init == 'fixed':\n est_img_kernel = sp.dirac(self.img_kernel_shape, dtype=np.complex64)[None, ...]\n est_img_kernel = np.repeat(est_img_kernel, self.batch_size, axis=0)\n # Image domain\n est_img_kernel = sp.ifft(est_img_kernel, axes=(-2, -1))\n est_img_kernel = torch.tensor(est_img_kernel, dtype=torch.cfloat).cuda()\n elif self.img_init == 'estimated':\n # Get adjoint map operator\n _, adjoint_ops, _ = \\\n self.get_core_torch_ops(est_maps_kernel, None, \n mask, 'ConvSense')\n adjoint_batch_op = self.get_batch_op(adjoint_ops, self.batch_size)\n # Apply\n est_img_kernel = adjoint_batch_op(ksp).type(torch.complex64)\n \n # Logging outputs\n if self.logging:\n # Kernels after denoiser modules\n mps_kernel_denoised = []\n img_kernel_denoised = []\n # Estimated logs\n mps_logs, img_logs = [], []\n ksp_logs = []\n mps_logs.append(copy.deepcopy(est_maps_kernel))\n img_logs.append(copy.deepcopy(est_img_kernel))\n # Internal logs\n before_maps, after_maps = [], []\n att_logs = []\n \n # For each outer unroll\n for meta_idx in range(meta_unrolls):\n ## !!! Block 1\n if self.block1_max_iter > 0:\n if self.mode == 'MoDL':\n assert False, 'Shouldn''t be here!'\n \n # Get operators for images --> maps using image kernel\n normal_ops, adjoint_ops, forward_ops = \\\n self.get_core_torch_ops(None, est_img_kernel, \n mask, 'ConvImage')\n # Get joint batch operators for adjoint and normal\n normal_batch_op, adjoint_batch_op = \\\n self.get_batch_op(normal_ops, self.batch_size), \\\n self.get_batch_op(adjoint_ops, self.batch_size)\n \n # Compute RHS\n if meta_idx == 0:\n rhs = adjoint_batch_op(ksp)\n else:\n rhs = adjoint_batch_op(ksp) + self.block1_l2lam[0] * est_maps_kernel\n \n # Get unrolled CG op\n cg_op = ZConjGrad(rhs, normal_batch_op,\n l2lam=self.block1_l2lam[0], \n max_iter=self.block1_max_iter,\n eps=self.cg_eps, verbose=self.verbose)\n # Run CG\n est_maps_kernel = cg_op(est_maps_kernel)\n # Log\n if self.logging:\n mps_logs.append(copy.deepcopy(est_maps_kernel))\n \n # Pre-process\n if not self.use_map_net:\n pass\n else:\n # Transform map kernel to image space\n est_maps_kernel = ifft(est_maps_kernel)\n # Convert to real and treat as a set\n est_maps_kernel = torch.view_as_real(est_maps_kernel)\n est_maps_kernel = est_maps_kernel.permute(0, 1, -1, 2, 3)\n # Absorb batch dimension\n est_maps_kernel = est_maps_kernel[0]\n \n # Log right before\n if self.logging:\n before_maps.append(est_maps_kernel.cpu().detach().numpy())\n \n # Apply denoising network\n if not self.use_map_net:\n pass\n else:\n est_maps_kernel = self.maps_net(est_maps_kernel)\n \n # Log right after\n if self.logging:\n after_maps.append(est_maps_kernel.cpu().detach().numpy())\n \n # Post-process\n if not self.use_map_net:\n pass\n else:\n # Inject batch dimension and re-arrange\n est_maps_kernel = est_maps_kernel[None, ...]\n est_maps_kernel = est_maps_kernel.permute(0, 1, 3, 4, 2).contiguous()\n \n # Convert back to frequency domain\n est_maps_kernel = torch.view_as_complex(est_maps_kernel)\n est_maps_kernel = fft(est_maps_kernel)\n \n # Log\n if self.logging:\n mps_kernel_denoised.append(copy.deepcopy(est_maps_kernel))\n \n ## !!! Block 2\n # Get operators for maps --> images using map kernel\n normal_ops, adjoint_ops, forward_ops = \\\n self.get_core_torch_ops(est_maps_kernel, None, \n mask, 'ConvSense')\n # Get joint batch operators for adjoint and normal\n normal_batch_op, adjoint_batch_op = \\\n self.get_batch_op(normal_ops, self.batch_size), \\\n self.get_batch_op(adjoint_ops, self.batch_size)\n \n # Compute RHS\n if meta_idx == 0:\n rhs = adjoint_batch_op(ksp)\n else:\n rhs = adjoint_batch_op(ksp) + self.block2_l2lam[0] * est_img_kernel\n \n # Get unrolled CG op\n cg_op = ZConjGrad(rhs, normal_batch_op, \n l2lam=self.block2_l2lam[0], \n max_iter=self.block2_max_iter,\n eps=self.cg_eps, verbose=self.verbose)\n # Run CG\n est_img_kernel = cg_op(est_img_kernel)\n # Log\n if self.logging:\n img_logs.append(est_img_kernel)\n \n # Convert to reals\n est_img_kernel = torch.view_as_real(est_img_kernel)\n \n # Apply image denoising network in image space\n if self.img_sep:\n est_img_kernel = self.image_net[meta_idx](\n est_img_kernel.permute(\n 0, 3, 1, 2)).permute(0, 2, 3, 1).contiguous()\n else:\n est_img_kernel = self.image_net(est_img_kernel.permute(\n 0, 3, 1, 2)).permute(0, 2, 3, 1).contiguous()\n \n # Convert to complex\n est_img_kernel = torch.view_as_complex(est_img_kernel)\n \n # Log\n if self.logging:\n img_kernel_denoised.append(est_img_kernel)\n \n # For all unrolls, construct k-space\n if meta_idx < meta_unrolls - 1:\n _, _, scratch_ops = \\\n self.get_core_torch_ops(est_maps_kernel, None,\n self.ones_mask, 'ConvSense')\n scratch_batch_op = self.get_batch_op(scratch_ops, self.batch_size)\n est_ksp = scratch_batch_op(est_img_kernel)\n # Log\n ksp_logs.append(est_ksp)\n \n # Compute output coils with an unmasked convolution operator\n _, _, forward_ops = \\\n self.get_core_torch_ops(est_maps_kernel, None,\n self.ones_mask, 'ConvSense')\n forward_batch_op = self.get_batch_op(forward_ops, self.batch_size)\n est_ksp = forward_batch_op(est_img_kernel)\n \n if self.logging:\n # Add final ksp to logs\n ksp_logs.append(est_ksp)\n # Glue logs\n mps_logs = torch.cat(mps_logs, dim=0)\n img_logs = torch.cat(img_logs, dim=0)\n if not self.mode == 'MoDL':\n mps_kernel_denoised = torch.cat(mps_kernel_denoised, dim=0)\n img_kernel_denoised = torch.cat(img_kernel_denoised, dim=0)\n \n if self.logging:\n return est_img_kernel, est_maps_kernel, est_ksp, \\\n mps_logs, img_logs, mps_kernel_denoised, img_kernel_denoised, \\\n ksp_logs, before_maps, after_maps, att_logs\n else:\n return est_img_kernel, est_maps_kernel, est_ksp\n"
] | [
[
"torch.nn.Identity",
"torch.cat",
"torch.view_as_real",
"numpy.ceil",
"torch.no_grad",
"numpy.ones",
"torch.view_as_complex",
"torch.ones",
"torch.tensor",
"numpy.repeat",
"numpy.mod",
"numpy.floor"
]
] |
0h-n0/first_deep | [
"8b4b1c3e2198774baaddac7b1045fecc95c59f0b"
] | [
"mnist.py"
] | [
"\"\"\"\nOriginal: https://github.com/pytorch/examples/blob/master/mnist/main.py\n\"\"\"\n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport torchex.nn as exnn\n\n\nclass FC(nn.Module):\n def __init__(self):\n super(FC, self).__init__()\n self.net = nn.Sequential(\n exnn.Flatten(),\n exnn.Linear(500),\n nn.ReLU(),\n exnn.Linear(10),\n nn.ReLU(), \n nn.Linear(10, 10))\n\n def forward(self, x):\n x = self.net(x)\n return F.log_softmax(x, dim=1)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4*4*50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4*4*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n \ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n \n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n\n #model = Net().to(device)\n model = FC().to(device) \n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n\n if (args.save_model):\n torch.save(model.state_dict(),\"mnist_cnn.pt\")\n \nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.Linear",
"torch.device",
"torch.no_grad",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.functional.nll_loss",
"torch.nn.functional.max_pool2d"
]
] |
neuromorphs/l2race | [
"ad5271afdd9d81375e45402bbef4a077d4bd45ac"
] | [
"src/controllers/neural_mpc_controller_util/nn_prediction/generate_data.py"
] | [
"import numpy as np\nimport math\nimport csv\nimport pandas as pd\nfrom racing.car import Car\nfrom racing.track import Track\nfrom mppi_mpc.car_controller import CarController\nfrom constants import *\nimport matplotlib.pyplot as plt\nfrom tqdm import trange\n\n\n\ntrack = Track()\ncar = Car(track)\n\npi = math.pi\n\n\ndef generate_distribution():\n\n nn_inputs = []\n nn_results = []\n\n track = Track()\n car = Car(track)\n number_of_initial_states = 1\n number_of_trajectories = 500\n number_of_steps_per_trajectory = 10\n\n car.state = [0,0,0,0,0,0,0]\n\n #Position => invariant since we are only interrested in delta\n x_dist = np.zeros(number_of_initial_states)\n y_dist = np.zeros(number_of_initial_states)\n\n #Steering of front wheels\n delta_dist = np.random.uniform(-1,1, number_of_initial_states) \n\n #velocity in face direction\n v_dist = np.random.uniform(6, 15, number_of_initial_states) \n\n #Yaw Angle\n yaw_dist = np.random.uniform(-pi, pi, number_of_initial_states)\n\n #Yaw rate\n yaw_rate_dist = np.random.uniform(-1, 1, number_of_initial_states)\n\n #Slip angle\n slip_angle_dist = np.random.uniform(-0.1, 0.1, number_of_initial_states)\n\n\n states = np.column_stack((x_dist, y_dist, delta_dist, v_dist, yaw_dist, yaw_rate_dist, slip_angle_dist))\n\n print(states.shape)\n print(states[0])\n\n for i in trange(len(states)):\n\n state = states[i]\n mu, sigma = 0, 0.4 # mean and standard deviation\n u0_dist = np.random.normal(mu, sigma, number_of_trajectories)\n mu, sigma = 0, 0.5 # mean and standard deviation\n u1_dist = np.random.normal(mu, sigma, number_of_trajectories)\n\n controls = np.column_stack((u0_dist, u1_dist))\n results = []\n\n for j in range(number_of_trajectories):\n car.state = state \n\n for k in range (number_of_steps_per_trajectory):\n control = controls[j]\n\n state_and_control = np.append(car.state,control)\n car.step(control)\n state_and_control_and_future_state = np.append(state_and_control,car.state)\n results.append(state_and_control_and_future_state)\n\n\n car.draw_history(\"test.png\")\n\n with open(\"nn_prediction/training_data.csv\", 'a', encoding='UTF8') as f:\n writer = csv.writer(f)\n time = 0\n for result in results:\n \n time_state_and_control = np.append(time, result)\n\n #time, x1,x2,x3,x4,x5,x6,x7,u1,u2,x1n,x2n,x3n,x4n,x5n,x6n,x7n\n writer.writerow(time_state_and_control)\n time = round(time+0.2, 2)\n\n\n\nif __name__ == \"__main__\":\n\n generate_distribution()\n\n"
] | [
[
"numpy.random.normal",
"numpy.zeros",
"numpy.random.uniform",
"numpy.append",
"numpy.column_stack"
]
] |
SimranJain13/CarDamageDetection | [
"ba0b8ee6f3445a59723b005799d68e3285cb991d"
] | [
"mrcnn/visualize.py"
] | [
"\"\"\"\r\nMask R-CNN\r\nDisplay and Visualization Functions.\r\n\r\nCopyright (c) 2017 Matterport, Inc.\r\nLicensed under the MIT License (see LICENSE for details)\r\nWritten by Waleed Abdulla\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport random\r\nimport itertools\r\nimport colorsys\r\n\r\nimport numpy as np\r\nfrom skimage.measure import find_contours\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import patches, lines\r\nfrom matplotlib.patches import Polygon\r\nimport IPython.display\r\n\r\n# Root directory of the project\r\nROOT_DIR = os.path.abspath(\"../\")\r\n\r\n# Import Mask RCNN\r\nsys.path.append(ROOT_DIR) # To find local version of the library\r\nfrom mrcnn import utils\r\n\r\n\r\n############################################################\r\n# Visualization\r\n############################################################\r\n\r\ndef display_images(images, titles=None, cols=4, cmap=None, norm=None,\r\n interpolation=None):\r\n \"\"\"Display the given set of images, optionally with titles.\r\n images: list or array of image tensors in HWC format.\r\n titles: optional. A list of titles to display with each image.\r\n cols: number of images per row\r\n cmap: Optional. Color map to use. For example, \"Blues\".\r\n norm: Optional. A Normalize instance to map values to colors.\r\n interpolation: Optional. Image interpolation to use for display.\r\n \"\"\"\r\n titles = titles if titles is not None else [\"\"] * len(images)\r\n rows = len(images) // cols + 1\r\n plt.figure(figsize=(14, 14 * rows // cols))\r\n i = 1\r\n for image, title in zip(images, titles):\r\n plt.subplot(rows, cols, i)\r\n plt.title(title, fontsize=9)\r\n plt.axis('off')\r\n plt.imshow(image.astype(np.uint8), cmap=cmap,\r\n norm=norm, interpolation=interpolation)\r\n i += 1\r\n plt.show()\r\n\r\n\r\ndef random_colors(N, bright=True):\r\n \"\"\"\r\n Generate random colors.\r\n To get visually distinct colors, generate them in HSV space then\r\n convert to RGB.\r\n \"\"\"\r\n brightness = 1.0 if bright else 0.7\r\n hsv = [(i / N, 1, brightness) for i in range(N)]\r\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\r\n random.shuffle(colors)\r\n return colors\r\n\r\n\r\ndef apply_mask(image, mask, color, alpha=0.5):\r\n \"\"\"Apply the given mask to the image.\r\n \"\"\"\r\n for c in range(3):\r\n image[:, :, c] = np.where(mask == 1,\r\n image[:, :, c] *\r\n (1 - alpha) + alpha * color[c] * 255,\r\n image[:, :, c])\r\n return image\r\n\r\n\r\ndef display_instances(image, boxes, masks, class_ids, class_names,\r\n scores=None, title=\"\",\r\n figsize=(16, 16), ax=None,\r\n show_mask=True, show_bbox=True,\r\n colors=None, captions=None):\r\n \"\"\"\r\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\r\n masks: [height, width, num_instances]\r\n class_ids: [num_instances]\r\n class_names: list of class names of the dataset\r\n scores: (optional) confidence scores for each box\r\n title: (optional) Figure title\r\n show_mask, show_bbox: To show masks and bounding boxes or not\r\n figsize: (optional) the size of the image\r\n colors: (optional) An array or colors to use with each object\r\n captions: (optional) A list of strings to use as captions for each object\r\n \"\"\"\r\n # Number of instances\r\n N = boxes.shape[0]\r\n if not N:\r\n print(\"\\n*** No instances to display *** \\n\")\r\n else:\r\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\r\n\r\n # If no axis is passed, create one and automatically call show()\r\n auto_show = True\r\n if not ax:\r\n _, ax = plt.subplots(1, figsize=figsize)\r\n auto_show = True\r\n\r\n # Generate random colors\r\n colors = colors or random_colors(N)\r\n\r\n # Show area outside image boundaries.\r\n height, width = image.shape[:2]\r\n ax.set_ylim(height + 10, -10)\r\n ax.set_xlim(-10, width + 10)\r\n ax.axis('off')\r\n ax.set_title(title)\r\n\r\n masked_image = image.astype(np.uint32).copy()\r\n for i in range(N):\r\n color = colors[i]\r\n\r\n # Bounding box\r\n if not np.any(boxes[i]):\r\n # Skip this instance. Has no bbox. Likely lost in image cropping.\r\n continue\r\n y1, x1, y2, x2 = boxes[i]\r\n if show_bbox:\r\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\r\n alpha=0.7, linestyle=\"dashed\",\r\n edgecolor=color, facecolor='none')\r\n ax.add_patch(p)\r\n\r\n # Label\r\n if not captions:\r\n class_id = class_ids[i]\r\n score = scores[i] if scores is not None else None\r\n label = class_names[class_id]\r\n caption = \"{} {:.3f}\".format(label, score) if score else label\r\n else:\r\n caption = captions[i]\r\n ax.text(x1, y1 + 8, caption,\r\n color='w', size=11, backgroundcolor=\"none\")\r\n\r\n # Mask\r\n mask = masks[:, :, i]\r\n if show_mask:\r\n masked_image = apply_mask(masked_image, mask, color)\r\n\r\n # Mask Polygon\r\n # Pad to ensure proper polygons for masks that touch image edges.\r\n padded_mask = np.zeros(\r\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\r\n padded_mask[1:-1, 1:-1] = mask\r\n contours = find_contours(padded_mask, 0.5)\r\n for verts in contours:\r\n # Subtract the padding and flip (y, x) to (x, y)\r\n verts = np.fliplr(verts) - 1\r\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\r\n ax.add_patch(p)\r\n ax.imshow(masked_image.astype(np.uint8))\r\n if auto_show:\r\n plt.show()\r\n else:\r\n plt.show()\r\n\r\n\r\ndef display_differences(image,\r\n gt_box, gt_class_id, gt_mask,\r\n pred_box, pred_class_id, pred_score, pred_mask,\r\n class_names, title=\"\", ax=None,\r\n show_mask=True, show_box=True,\r\n iou_threshold=0.5, score_threshold=0.5):\r\n \"\"\"Display ground truth and prediction instances on the same image.\"\"\"\r\n # Match predictions to ground truth\r\n gt_match, pred_match, overlaps = utils.compute_matches(\r\n gt_box, gt_class_id, gt_mask,\r\n pred_box, pred_class_id, pred_score, pred_mask,\r\n iou_threshold=iou_threshold, score_threshold=score_threshold)\r\n # Ground truth = green. Predictions = red\r\n colors = [(0, 1, 0, .8)] * len(gt_match)\\\r\n + [(1, 0, 0, 1)] * len(pred_match)\r\n # Concatenate GT and predictions\r\n class_ids = np.concatenate([gt_class_id, pred_class_id])\r\n scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])\r\n boxes = np.concatenate([gt_box, pred_box])\r\n masks = np.concatenate([gt_mask, pred_mask], axis=-1)\r\n # Captions per instance show score/IoU\r\n captions = [\"\" for m in gt_match] + [\"{:.2f} / {:.2f}\".format(\r\n pred_score[i],\r\n (overlaps[i, int(pred_match[i])]\r\n if pred_match[i] > -1 else overlaps[i].max()))\r\n for i in range(len(pred_match))]\r\n # Set title if not provided\r\n title = title or \"Ground Truth and Detections\\n GT=green, pred=red, captions: score/IoU\"\r\n # Display\r\n display_instances(\r\n image,\r\n boxes, masks, class_ids,\r\n class_names, scores, ax=ax,\r\n show_bbox=show_box, show_mask=show_mask,\r\n colors=colors, captions=captions,\r\n title=title)\r\n\r\n\r\ndef draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):\r\n \"\"\"\r\n anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.\r\n proposals: [n, 4] the same anchors but refined to fit objects better.\r\n \"\"\"\r\n masked_image = image.copy()\r\n\r\n # Pick random anchors in case there are too many.\r\n ids = np.arange(rois.shape[0], dtype=np.int32)\r\n ids = np.random.choice(\r\n ids, limit, replace=False) if ids.shape[0] > limit else ids\r\n\r\n fig, ax = plt.subplots(1, figsize=(12, 12))\r\n if rois.shape[0] > limit:\r\n plt.title(\"Showing {} random ROIs out of {}\".format(\r\n len(ids), rois.shape[0]))\r\n else:\r\n plt.title(\"{} ROIs\".format(len(ids)))\r\n\r\n # Show area outside image boundaries.\r\n ax.set_ylim(image.shape[0] + 20, -20)\r\n ax.set_xlim(-50, image.shape[1] + 20)\r\n ax.axis('off')\r\n\r\n for i, id in enumerate(ids):\r\n color = np.random.rand(3)\r\n class_id = class_ids[id]\r\n # ROI\r\n y1, x1, y2, x2 = rois[id]\r\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\r\n edgecolor=color if class_id else \"gray\",\r\n facecolor='none', linestyle=\"dashed\")\r\n ax.add_patch(p)\r\n # Refined ROI\r\n if class_id:\r\n ry1, rx1, ry2, rx2 = refined_rois[id]\r\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\r\n edgecolor=color, facecolor='none')\r\n ax.add_patch(p)\r\n # Connect the top-left corners of the anchor and proposal for easy visualization\r\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\r\n\r\n # Label\r\n label = class_names[class_id]\r\n ax.text(rx1, ry1 + 8, \"{}\".format(label),\r\n color='w', size=11, backgroundcolor=\"none\")\r\n\r\n # Mask\r\n m = utils.unmold_mask(mask[id], rois[id]\r\n [:4].astype(np.int32), image.shape)\r\n masked_image = apply_mask(masked_image, m, color)\r\n\r\n ax.imshow(masked_image)\r\n\r\n # Print stats\r\n print(\"Positive ROIs: \", class_ids[class_ids > 0].shape[0])\r\n print(\"Negative ROIs: \", class_ids[class_ids == 0].shape[0])\r\n print(\"Positive Ratio: {:.2f}\".format(\r\n class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))\r\n\r\n\r\n# TODO: Replace with matplotlib equivalent?\r\ndef draw_box(image, box, color):\r\n \"\"\"Draw 3-pixel width bounding boxes on the given image array.\r\n color: list of 3 int values for RGB.\r\n \"\"\"\r\n y1, x1, y2, x2 = box\r\n image[y1:y1 + 2, x1:x2] = color\r\n image[y2:y2 + 2, x1:x2] = color\r\n image[y1:y2, x1:x1 + 2] = color\r\n image[y1:y2, x2:x2 + 2] = color\r\n return image\r\n\r\n\r\ndef display_top_masks(image, mask, class_ids, class_names, limit=4):\r\n \"\"\"Display the given image and the top few class masks.\"\"\"\r\n to_display = []\r\n titles = []\r\n to_display.append(image)\r\n titles.append(\"H x W={}x{}\".format(image.shape[0], image.shape[1]))\r\n # Pick top prominent classes in this image\r\n unique_class_ids = np.unique(class_ids)\r\n mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])\r\n for i in unique_class_ids]\r\n top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),\r\n key=lambda r: r[1], reverse=True) if v[1] > 0]\r\n # Generate images and titles\r\n for i in range(limit):\r\n class_id = top_ids[i] if i < len(top_ids) else -1\r\n # Pull masks of instances belonging to the same class.\r\n m = mask[:, :, np.where(class_ids == class_id)[0]]\r\n m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)\r\n to_display.append(m)\r\n titles.append(class_names[class_id] if class_id != -1 else \"-\")\r\n display_images(to_display, titles=titles, cols=limit + 1, cmap=\"Blues_r\")\r\n\r\n\r\ndef plot_precision_recall(AP, precisions, recalls):\r\n \"\"\"Draw the precision-recall curve.\r\n\r\n AP: Average precision at IoU >= 0.5\r\n precisions: list of precision values\r\n recalls: list of recall values\r\n \"\"\"\r\n # Plot the Precision-Recall curve\r\n _, ax = plt.subplots(1)\r\n ax.set_title(\"Precision-Recall Curve. AP@50 = {:.3f}\".format(AP))\r\n ax.set_ylim(0, 1.1)\r\n ax.set_xlim(0, 1.1)\r\n _ = ax.plot(recalls, precisions)\r\n\r\n\r\ndef plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,\r\n overlaps, class_names, threshold=0.5):\r\n \"\"\"Draw a grid showing how ground truth objects are classified.\r\n gt_class_ids: [N] int. Ground truth class IDs\r\n pred_class_id: [N] int. Predicted class IDs\r\n pred_scores: [N] float. The probability scores of predicted classes\r\n overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.\r\n class_names: list of all class names in the dataset\r\n threshold: Float. The prediction probability required to predict a class\r\n \"\"\"\r\n gt_class_ids = gt_class_ids[gt_class_ids != 0]\r\n pred_class_ids = pred_class_ids[pred_class_ids != 0]\r\n\r\n plt.figure(figsize=(12, 10))\r\n plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)\r\n plt.yticks(np.arange(len(pred_class_ids)),\r\n [\"{} ({:.2f})\".format(class_names[int(id)], pred_scores[i])\r\n for i, id in enumerate(pred_class_ids)])\r\n plt.xticks(np.arange(len(gt_class_ids)),\r\n [class_names[int(id)] for id in gt_class_ids], rotation=90)\r\n\r\n thresh = overlaps.max() / 2.\r\n for i, j in itertools.product(range(overlaps.shape[0]),\r\n range(overlaps.shape[1])):\r\n text = \"\"\r\n if overlaps[i, j] > threshold:\r\n text = \"match\" if gt_class_ids[j] == pred_class_ids[i] else \"wrong\"\r\n color = (\"white\" if overlaps[i, j] > thresh\r\n else \"black\" if overlaps[i, j] > 0\r\n else \"grey\")\r\n plt.text(j, i, \"{:.3f}\\n{}\".format(overlaps[i, j], text),\r\n horizontalalignment=\"center\", verticalalignment=\"center\",\r\n fontsize=9, color=color)\r\n\r\n plt.tight_layout()\r\n plt.xlabel(\"Ground Truth\")\r\n plt.ylabel(\"Predictions\")\r\n\r\n\r\ndef draw_boxes(image, boxes=None, refined_boxes=None,\r\n masks=None, captions=None, visibilities=None,\r\n title=\"\", ax=None):\r\n \"\"\"Draw bounding boxes and segmentation masks with different\r\n customizations.\r\n\r\n boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.\r\n refined_boxes: Like boxes, but draw with solid lines to show\r\n that they're the result of refining 'boxes'.\r\n masks: [N, height, width]\r\n captions: List of N titles to display on each box\r\n visibilities: (optional) List of values of 0, 1, or 2. Determine how\r\n prominent each bounding box should be.\r\n title: An optional title to show over the image\r\n ax: (optional) Matplotlib axis to draw on.\r\n \"\"\"\r\n # Number of boxes\r\n assert boxes is not None or refined_boxes is not None\r\n N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]\r\n\r\n # Matplotlib Axis\r\n if not ax:\r\n _, ax = plt.subplots(1, figsize=(12, 12))\r\n\r\n # Generate random colors\r\n colors = random_colors(N)\r\n\r\n # Show area outside image boundaries.\r\n margin = image.shape[0] // 10\r\n ax.set_ylim(image.shape[0] + margin, -margin)\r\n ax.set_xlim(-margin, image.shape[1] + margin)\r\n ax.axis('off')\r\n\r\n ax.set_title(title)\r\n\r\n masked_image = image.astype(np.uint32).copy()\r\n for i in range(N):\r\n # Box visibility\r\n visibility = visibilities[i] if visibilities is not None else 1\r\n if visibility == 0:\r\n color = \"gray\"\r\n style = \"dotted\"\r\n alpha = 0.5\r\n elif visibility == 1:\r\n color = colors[i]\r\n style = \"dotted\"\r\n alpha = 1\r\n elif visibility == 2:\r\n color = colors[i]\r\n style = \"solid\"\r\n alpha = 1\r\n\r\n # Boxes\r\n if boxes is not None:\r\n if not np.any(boxes[i]):\r\n # Skip this instance. Has no bbox. Likely lost in cropping.\r\n continue\r\n y1, x1, y2, x2 = boxes[i]\r\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\r\n alpha=alpha, linestyle=style,\r\n edgecolor=color, facecolor='none')\r\n ax.add_patch(p)\r\n\r\n # Refined boxes\r\n if refined_boxes is not None and visibility > 0:\r\n ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)\r\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\r\n edgecolor=color, facecolor='none')\r\n ax.add_patch(p)\r\n # Connect the top-left corners of the anchor and proposal\r\n if boxes is not None:\r\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\r\n\r\n # Captions\r\n if captions is not None:\r\n caption = captions[i]\r\n # If there are refined boxes, display captions on them\r\n if refined_boxes is not None:\r\n y1, x1, y2, x2 = ry1, rx1, ry2, rx2\r\n ax.text(x1, y1, caption, size=11, verticalalignment='top',\r\n color='w', backgroundcolor=\"none\",\r\n bbox={'facecolor': color, 'alpha': 0.5,\r\n 'pad': 2, 'edgecolor': 'none'})\r\n\r\n # Masks\r\n if masks is not None:\r\n mask = masks[:, :, i]\r\n masked_image = apply_mask(masked_image, mask, color)\r\n # Mask Polygon\r\n # Pad to ensure proper polygons for masks that touch image edges.\r\n padded_mask = np.zeros(\r\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\r\n padded_mask[1:-1, 1:-1] = mask\r\n contours = find_contours(padded_mask, 0.5)\r\n for verts in contours:\r\n # Subtract the padding and flip (y, x) to (x, y)\r\n verts = np.fliplr(verts) - 1\r\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\r\n ax.add_patch(p)\r\n ax.imshow(masked_image.astype(np.uint8))\r\n\r\n\r\ndef display_table(table):\r\n \"\"\"Display values in a table format.\r\n table: an iterable of rows, and each row is an iterable of values.\r\n \"\"\"\r\n html = \"\"\r\n for row in table:\r\n row_html = \"\"\r\n for col in row:\r\n row_html += \"<td>{:40}</td>\".format(str(col))\r\n html += \"<tr>\" + row_html + \"</tr>\"\r\n html = \"<table>\" + html + \"</table>\"\r\n IPython.display.display(IPython.display.HTML(html))\r\n\r\n\r\ndef display_weight_stats(model):\r\n \"\"\"Scans all the weights in the model and returns a list of tuples\r\n that contain stats about each weight.\r\n \"\"\"\r\n layers = model.get_trainable_layers()\r\n table = [[\"WEIGHT NAME\", \"SHAPE\", \"MIN\", \"MAX\", \"STD\"]]\r\n for l in layers:\r\n weight_values = l.get_weights() # list of Numpy arrays\r\n weight_tensors = l.weights # list of TF tensors\r\n for i, w in enumerate(weight_values):\r\n weight_name = weight_tensors[i].name\r\n # Detect problematic layers. Exclude biases of conv layers.\r\n alert = \"\"\r\n if w.min() == w.max() and not (l.__class__.__name__ == \"Conv2D\" and i == 1):\r\n alert += \"<span style='color:red'>*** dead?</span>\"\r\n if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:\r\n alert += \"<span style='color:red'>*** Overflow?</span>\"\r\n # Add row\r\n table.append([\r\n weight_name + alert,\r\n str(w.shape),\r\n \"{:+9.4f}\".format(w.min()),\r\n \"{:+10.4f}\".format(w.max()),\r\n \"{:+9.4f}\".format(w.std()),\r\n ])\r\n display_table(table)\r\n"
] | [
[
"numpy.random.choice",
"numpy.random.rand",
"numpy.where",
"matplotlib.patches.Rectangle",
"numpy.concatenate",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.lines.Line2D",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.fliplr",
"matplotlib.pyplot.xlabel",
"numpy.any",
"matplotlib.pyplot.ylabel",
"numpy.unique",
"matplotlib.pyplot.imshow"
]
] |
kapseliboi/Datascope | [
"c781bc70bf644365a48f63cb560a9d2fe0ae2e3b"
] | [
"public/data/testimages/generate-images.py"
] | [
"import numpy, Image\n\nfor n in xrange(10000):\n a = numpy.random.rand(64,64,3) * 255\n im_out = Image.fromarray(a.astype('uint8')).convert('RGBA')\n im_out.save('%000d.jpg' % n)"
] | [
[
"numpy.random.rand"
]
] |
zerohd4869/Chinese-NER | [
"53e259690538a54761a16caa41cc78535d61aa04"
] | [
"model/cw_ner/modules/highway.py"
] | [
"\n\"\"\"\nimplements of the highway net\ninclude two gate: transform gate(G_T) and carry gate(G_C)\n H = w_h * x + b_h\n G_T = sigmoid(w_t * x + b_t)\n G_C = sigmoid(w_c * x + b_c)\noutputs:\n outputs = G_T * H + G_C * x\n\nfor sample:\n G_C = (1 - G_T), then:\n outputs = G_T * H + (1 - G_T) * x\n and generally set b_c = -1 or -3, that mean set b_t = 1 or 3\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom model.cw_ner.functions.iniatlize import init_highway\n\n\nclass Highway(nn.Module):\n def __init__(self, input_dim, num_layers=1, activation=nn.functional.relu,\n require_grad=True):\n \"\"\"\n Args:\n input_dim: the dim\n num_layers: the numer of highway layers\n activation: activation function, tanh or relu\n \"\"\"\n super(Highway, self).__init__()\n\n self._input_dim = input_dim\n self._num_layers = num_layers\n\n # output is input_dim * 2, because one is candidate status, and another\n # is transform gate\n self._layers = torch.nn.ModuleList(\n [nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]\n )\n self._activation = activation\n i = 0\n for layer in self._layers:\n layer.weight.requires_grad = require_grad\n layer.bias.requires_grad = require_grad\n init_highway(layer, 100)\n layer.bias[input_dim:].data.fill_(1)\n\n i += 1\n\n def forward(self, inputs):\n \"\"\"\n Args:\n inputs: a tensor, size is [batch_size, n_tokens, input_dim]\n \"\"\"\n current_input = inputs\n for layer in self._layers:\n proj_inputs = layer(current_input)\n linear_part = current_input\n\n del current_input\n\n # here the gate is carry gate, if you change it to transform gate\n # the bias init should change too, maybe -1 or -3 even\n nonlinear_part, carry_gate = proj_inputs.chunk(2, dim=-1)\n nonlinear_part = self._activation(nonlinear_part)\n carry_gate = torch.nn.functional.sigmoid(carry_gate)\n current_input = (1 - carry_gate) * nonlinear_part + carry_gate * linear_part\n\n return current_input\n"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.sigmoid"
]
] |
mellis13/SmartRedis | [
"b3d0bd07b53138f8b30cbb3291125e33839977d2"
] | [
"tests/python/test_put_get_dataset.py"
] | [
"# BSD 2-Clause License\n#\n# Copyright (c) 2021, Hewlett Packard Enterprise\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport time\n\nimport numpy as np\nfrom smartredis import Client, Dataset\n\n\ndef test_put_get_dataset(mock_data, use_cluster):\n \"\"\"test sending and recieving a dataset with 2D tensors\n of every datatype\n \"\"\"\n\n data = mock_data.create_data((10, 10))\n\n # Create a dataset to put\n dataset = Dataset(\"test-dataset\")\n for index, tensor in enumerate(data):\n key = f\"tensor_{str(index)}\"\n dataset.add_tensor(key, tensor)\n\n client = Client(None, use_cluster)\n\n assert not client.dataset_exists(\n \"nonexistent-dataset\"\n ), \"Existence of nonexistant dataset!\"\n\n client.put_dataset(dataset)\n\n assert client.dataset_exists(\"test-dataset\"), \"Non-existance of real dataset!\"\n\n rdataset = client.get_dataset(\"test-dataset\")\n for index, tensor in enumerate(data):\n key = f\"tensor_{str(index)}\"\n rtensor = rdataset.get_tensor(key)\n np.testing.assert_array_equal(\n rtensor,\n tensor,\n \"Dataset returned from get_dataset not the same as sent dataset\",\n )\n\n\ndef test_augment_dataset(mock_data, use_cluster):\n \"\"\"Test sending, receiving, altering, and sending\n a Dataset.\n \"\"\"\n # Create mock test data\n data = mock_data.create_data((10, 10))\n\n # Set test dataset name\n dataset_name = \"augment-dataset\"\n\n # Initialize a client\n client = Client(None, use_cluster)\n\n # Create a dataset to put into the database\n dataset = Dataset(dataset_name)\n for index, tensor in enumerate(data):\n tensor_name = f\"tensor_{str(index)}\"\n dataset.add_tensor(tensor_name, tensor)\n\n # Send the dataset\n client.put_dataset(dataset)\n\n # Retrieve the dataset\n rdataset = client.get_dataset(dataset_name)\n\n # Add a new tensor to the retrieved dataset\n new_tensor = np.array([1.0, 2.0, 3.0])\n rdataset.add_tensor(\"new_tensor\", new_tensor)\n\n # Add a metadata scalar field to the dataset\n scalar_field = 1.0\n scalar_name = \"scalar_field_1\"\n rdataset.add_meta_scalar(scalar_name, scalar_field)\n\n # Add a metadata string field to the dataset\n string_field = \"test_string\"\n string_name = \"string_field\"\n rdataset.add_meta_string(string_name, string_field)\n\n # Send the augmented dataset\n client.put_dataset(rdataset)\n\n # Retrieve the augmented dataset\n aug_dataset = client.get_dataset(dataset_name)\n\n # Check the accuracy of the augmented dataset\n for index, tensor in enumerate(data):\n tensor_name = f\"tensor_{str(index)}\"\n rtensor = aug_dataset.get_tensor(tensor_name)\n np.testing.assert_array_equal(\n rtensor,\n tensor,\n \"Dataset returned from get_dataset not the same as sent dataset\",\n )\n\n rtensor = aug_dataset.get_tensor(\"new_tensor\")\n np.testing.assert_array_equal(\n rtensor,\n new_tensor,\n \"Dataset returned did not return the correct additional tensor\",\n )\n\n # Check the accuracy of the metadat fields\n assert aug_dataset.get_meta_scalars(scalar_name).size == 1\n assert len(aug_dataset.get_meta_strings(string_name)) == 1\n assert aug_dataset.get_meta_scalars(scalar_name)[0] == scalar_field\n assert aug_dataset.get_meta_strings(string_name)[0] == string_field\n"
] | [
[
"numpy.array",
"numpy.testing.assert_array_equal"
]
] |
delldu/Fast-Image-Filters | [
"c238214fa4aaf1b4790b29e7306d6f772e1acf07"
] | [
"project/model_helper.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass AdaptiveBatchNorm2d(nn.Module):\n\n \"\"\"Adaptive batch normalization\"\"\"\n\n def __init__(self, num_feat, eps=1e-5, momentum=0.1, affine=True):\n \"\"\"Adaptive batch normalization\"\"\"\n super().__init__()\n self.bn = nn.BatchNorm2d(num_feat, eps, momentum, affine)\n self.a = nn.Parameter(torch.ones(1, 1, 1, 1))\n self.b = nn.Parameter(torch.zeros(1, 1, 1, 1))\n\n def forward(self, x):\n return self.a * x + self.b * self.bn(x)\n\n\nclass ConvBlock(nn.Module):\n\n \"\"\"Convolution head\"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n padding: int,\n dilation: int,\n norm_layer: nn.Module = AdaptiveBatchNorm2d,\n ):\n \"\"\"\n @in_channels: number of input channels\n @out_channels: number of output channels\n @dilation: dilation factor @activation: 'relu'- relu,\n 'lrelu': leaky relu\n @norm_layer: 'bn': batch norm, 'in': instance norm, 'gn': group\n norm, 'an': adaptive norm\n \"\"\"\n super().__init__()\n convblk = []\n\n convblk.extend(\n [\n nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n ),\n nn.LeakyReLU(negative_slope=0.2),\n norm_layer(out_channels) if norm_layer is not None else nn.Identity(),\n ]\n )\n\n self.convblk = nn.Sequential(*convblk)\n self.init_weights(self.convblk)\n\n def identity_init(self, shape):\n array = np.zeros(shape, dtype=float)\n cx, cy = shape[2] // 2, shape[3] // 2\n for i in range(np.minimum(shape[0], shape[1])):\n array[i, i, cx, cy] = 1\n\n return array\n\n def init_weights(self, modules):\n for m in modules:\n if isinstance(m, nn.Conv2d):\n weights = self.identity_init(m.weight.shape)\n with torch.no_grad():\n m.weight.copy_(torch.from_numpy(weights).float())\n torch.nn.init.zeros_(m.bias)\n\n def forward(self, *inputs):\n return self.convblk(inputs[0])\n\n\nclass FIP(nn.Module):\n\n \"\"\"Model architecture for fast image filter\"\"\"\n\n def __init__(self):\n \"\"\"Initialization\"\"\"\n super().__init__()\n\n nbLayers = 24\n\n self.conv1 = ConvBlock(3, nbLayers, 3, 1, 1)\n self.conv2 = ConvBlock(nbLayers, nbLayers, 3, 2, 2)\n self.conv3 = ConvBlock(nbLayers, nbLayers, 3, 4, 4)\n self.conv4 = ConvBlock(nbLayers, nbLayers, 3, 8, 8)\n self.conv5 = ConvBlock(nbLayers, nbLayers, 3, 16, 16)\n self.conv6 = ConvBlock(nbLayers, nbLayers, 3, 32, 32)\n self.conv7 = ConvBlock(nbLayers, nbLayers, 3, 64, 64)\n self.conv8 = ConvBlock(nbLayers, nbLayers, 3, 1, 1)\n self.conv9 = nn.Conv2d(nbLayers, 3, kernel_size=1, dilation=1)\n\n self.weights_init(self.conv9)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.conv6(x)\n x = self.conv7(x)\n x = self.conv8(x)\n x = self.conv9(x)\n\n return x.clamp(0.0, 1.0)\n\n def weights_init(self, m):\n \"\"\"conv2d Init\"\"\"\n if isinstance(m, nn.Conv2d):\n torch.nn.init.xavier_uniform_(m.weight)\n torch.nn.init.zeros_(m.bias)\n\n\nif __name__ == \"__main__\":\n model = FIP()\n print(model)\n"
] | [
[
"torch.zeros",
"torch.nn.Identity",
"numpy.zeros",
"numpy.minimum",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.init.xavier_uniform_",
"torch.no_grad",
"torch.ones",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.init.zeros_"
]
] |
ollitapa/pycolortools | [
"8e0d28411ccbd96de036e81845b217d05a4eee44"
] | [
"tester.py"
] | [
"#\n# Copyright 2015 Olli Tapaninen, VTT Technical Research Center of Finland\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pycolortools\nimport numpy as np\n\ncie = pycolortools.CIEXYZ()\nill = pycolortools.Illuminants()\n\nill.illuminantA(np.array([350, 360, 370]))\n\nprint(\"Test CCT: %f\" % cie.calculateCCT([0.3, 0.3, 0.3]))\nprint(\"Should be: 7732.054428\")\n"
] | [
[
"numpy.array"
]
] |
Miffyli/minecraft-bc-2020 | [
"94f8706e547474a2ed8cacd41bb20e59f672215f"
] | [
"torch_codes/modules.py"
] | [
"#\n# PyTorch networks and modules\n#\n\nfrom collections import OrderedDict\nimport math\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n# References:\n# [1] IMPALA. https://arxiv.org/pdf/1802.01561.pdf\n# [2] R2D3. https://arxiv.org/pdf/1909.01387.pdf\n# [3] Unixpickle's work https://github.com/amiranas/minerl_imitation_learning/blob/master/model.py#L104\n\nclass ResidualBlock(nn.Module):\n \"\"\"\n Residual block from R2D3/IMPALA\n\n Taken from [1,2]\n \"\"\"\n\n def __init__(self, num_channels, first_conv_weight_scale):\n super().__init__()\n self.conv1 = nn.Conv2d(num_channels, num_channels,\n kernel_size=3, stride=1,\n padding=1, bias=False)\n\n self.conv2 = nn.Conv2d(num_channels, num_channels,\n kernel_size=3, stride=1,\n padding=1, bias=False)\n\n # Copy paste from [3]\n self.bias1 = nn.Parameter(torch.zeros([num_channels, 1, 1]))\n self.bias2 = nn.Parameter(torch.zeros([num_channels, 1, 1]))\n self.bias3 = nn.Parameter(torch.zeros([num_channels, 1, 1]))\n self.bias4 = nn.Parameter(torch.zeros([num_channels, 1, 1]))\n self.scale = nn.Parameter(torch.ones([num_channels, 1, 1]))\n\n # FixUp init (part of it):\n # - Final Convs in residual branches initialized\n # to zero\n # - Other convs in residual branches initialized\n # to a scaled value\n # - Biases handled manually as in [3]\n with torch.no_grad():\n self.conv2.weight *= 0\n self.conv1.weight *= first_conv_weight_scale\n\n def forward(self, x):\n x = F.relu(x, inplace=True)\n original = x\n\n # Copy/paste from [3]\n x = x + self.bias1\n x = self.conv1(x)\n x = x + self.bias2\n\n x = F.relu(x, inplace=True)\n\n x = x + self.bias3\n x = self.conv2(x)\n x = x * self.scale\n x = x + self.bias4\n\n return original + x\n\n\nclass ResNetHead(nn.Module):\n \"\"\"\n A small residual network CNN head for processing images.\n\n Architecture is from IMPALA paper in Fig 3 [1]\n \"\"\"\n\n def __init__(self, in_channels=3, filter_sizes=(16, 32, 32), add_extra_block=False):\n super().__init__()\n self.num_total_blocks = len(filter_sizes) + int(add_extra_block)\n self.blocks = []\n\n # Scaler for FixUp mid-most convolutions.\n # Scaling is L^(-1/(2m - 2)) . In our case m = 2 (two layers in branch),\n # so our rescaling is L^(-1/2) = 1 / sqrt(L).\n # L is number of residual branches in our network.\n # Each block in IMPALA has two branches.\n first_conv_weight_scale = 1 / (math.sqrt(self.num_total_blocks * 2))\n input_channels = in_channels\n for output_channels in filter_sizes:\n block = [\n nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1),\n nn.MaxPool2d(kernel_size=3, stride=2),\n ResidualBlock(output_channels, first_conv_weight_scale),\n ResidualBlock(output_channels, first_conv_weight_scale),\n ]\n self.blocks.extend(block)\n input_channels = output_channels\n # Number of blocks without max pooling\n if add_extra_block:\n self.blocks.extend((\n ResidualBlock(output_channels, first_conv_weight_scale),\n ResidualBlock(output_channels, first_conv_weight_scale)\n ))\n self.blocks = nn.Sequential(*self.blocks)\n\n def forward(self, x):\n x = self.blocks(x)\n # Flatten\n x = x.reshape(x.shape[0], -1)\n x = F.relu(x, inplace=True)\n return x\n\n\n# Some specific ResNet sizes\n# for different resolutions to avoid too\n# squuezed or large feaeture sizes.\ndef ResNetHeadFor64x64(in_channels):\n return ResNetHead(in_channels=in_channels)\n\n\ndef ResNetHeadFor32x32(in_channels):\n return ResNetHead(in_channels=in_channels, filter_sizes=(16, 32))\n\n\ndef ResNetHeadFor64x64DoubleFilters(in_channels):\n # As in [3]\n return ResNetHead(in_channels=in_channels, filter_sizes=(32, 64, 64))\n\n\ndef ResNetHeadFor64x64DoubleFiltersWithExtra(in_channels):\n # As in [3]\n return ResNetHead(in_channels=in_channels, filter_sizes=(32, 64, 64), add_extra_block=True)\n\n\ndef ResNetHeadFor64x64QuadrupleFilters(in_channels):\n return ResNetHead(in_channels=in_channels, filter_sizes=(64, 128, 128))\n\n\nclass NatureDQNHead(nn.Module):\n \"\"\"The CNN head from Nature DQN paper\"\"\"\n\n def __init__(self, in_channels=3):\n super().__init__()\n self.head = nn.Sequential(\n nn.Conv2d(in_channels, 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU(),\n )\n\n def forward(self, x):\n x = self.head(x)\n x = x.reshape(x.shape[0], -1)\n return x\n\n\nclass IMPALANetwork(nn.Module):\n \"\"\"\n IMPALA network [1] but without recurrence and\n with bit different network sizes.\n I.e. it takes in an image and some 1D features\n (\"additional features\"). Image is processed\n through a small ResNet network, and other\n features are appended to this output\n later in the network.\n\n Output is a dictionary with same keys\n as output_dict, all outputs being linear\n activations\n \"\"\"\n\n def __init__(\n self,\n image_shape,\n output_dict,\n num_additional_features,\n cnn_head_class=\"ResNetHead\",\n latent_size=512,\n num_heads=None\n ):\n \"\"\"\n Parameters:\n image_shape (List): Shape of the input images in CxHxW\n output_dict (Dict of str: int): Names and dimensions of outputs\n to produce\n num_additional_features (int): Number of additional\n features appended later\n cnn_head_class (str): Name of the class (visible to here)\n to use as preprocess for images\n latent_size (int): Size of the latent vector after concatenating\n image and additional observations\n num_heads (int): Number of different output heads. None is converted\n to one.\n \"\"\"\n super().__init__()\n self.num_heads = num_heads if num_heads is not None else 1\n self.output_dict = OrderedDict(output_dict)\n self.total_output_size = sum(list(self.output_dict.values()))\n self.num_additional_features = num_additional_features\n # SuperSecure^{tm}\n cnn_head_class = eval(cnn_head_class)\n self.cnn_head = cnn_head_class(\n in_channels=image_shape[0],\n )\n\n # Run something through the network to get the shape\n self.cnn_feat_size = self.cnn_head(torch.rand(*((1,) + image_shape))).shape[1]\n\n # Layer for additional features\n self.feats_fc1 = nn.Linear(self.num_additional_features, 128)\n\n # Append additional features\n self.num_combined_features = self.cnn_feat_size + 128\n\n # Create outputs. All outputs are one big list.\n self.final_fcs = nn.ModuleList([\n nn.Sequential(\n nn.Linear(self.num_combined_features, latent_size),\n nn.ReLU(),\n nn.Linear(latent_size, self.total_output_size)\n )\n for i in range(self.num_heads)\n ])\n\n def forward(self, image_observation, additional_features, head_indeces=None):\n # Normalize image (uint8)\n x = image_observation.float() / 255.0\n\n x = self.cnn_head(x)\n\n # Process additional features through one layer\n additional_features = self.feats_fc1(additional_features)\n additional_features = F.relu(additional_features)\n\n x = torch.cat((x, additional_features), dim=1)\n\n if head_indeces is None:\n x = self.final_fcs[0](x)\n else:\n # Run different batch elements through different heads.\n # TODO this should probably be parallelized somehow...\n # TODO normalize gradients?\n out_x = torch.zeros(x.shape[0], self.total_output_size).to(x.device)\n for batch_i in range(x.shape[0]):\n out_x[batch_i] = self.final_fcs[head_indeces[batch_i]](x[batch_i])\n x = out_x\n\n # Split to different dicts according to output sizes\n outputs = {}\n i = 0\n for name, size in self.output_dict.items():\n outputs[name] = x[:, i:i + size]\n i += size\n\n return outputs\n\n\nclass IMPALANetworkWithLSTM(nn.Module):\n \"\"\"\n IMPALA network [1].\n I.e. it takes in an image and some 1D features\n (\"additional features\"). Image is processed\n through a small ResNet network, and other\n features are appended to this output\n later in the network.\n\n Output is a dictionary with same keys\n as output_dict, all outputs being linear\n activations.\n \"\"\"\n\n def __init__(\n self,\n image_shape,\n output_dict,\n num_additional_features,\n cnn_head_class=\"ResNetHead\",\n latent_size=512,\n num_heads=None\n ):\n \"\"\"\n Parameters:\n image_shape (List): Shape of the input images in CxHxW\n output_dict (Dict of str: int): Names and dimensions of outputs\n to produce\n num_additional_features (int): Number of additional\n features appended later\n cnn_head_class (str): Name of the class (visible to here)\n to use as preprocess for images\n latent_size (int): Number of units for the hidden LSTM state\n \"\"\"\n assert num_heads is None, \"Sub-tasks not supported for LSTM version yet\"\n super().__init__()\n self.latent_size = latent_size\n self.output_dict = OrderedDict(output_dict)\n self.total_output_size = sum(list(self.output_dict.values()))\n self.num_additional_features = num_additional_features\n # SuperSecure^{tm}\n cnn_head_class = eval(cnn_head_class)\n self.cnn_head = cnn_head_class(\n in_channels=image_shape[0],\n )\n\n # Run something through the network to get the shape\n self.cnn_feat_size = self.cnn_head(torch.rand(*((1,) + image_shape))).shape[1]\n self.cnn_fc = nn.Linear(self.cnn_feat_size, 128)\n\n # Append additional features\n self.num_combined_features = 128 + self.num_additional_features\n\n self.lstm = nn.LSTM(\n input_size=self.num_combined_features,\n hidden_size=self.latent_size,\n num_layers=1\n )\n\n # Do all outputs as one big list\n self.final_fc = nn.Linear(self.latent_size, self.total_output_size)\n\n def get_initial_state(self, batch_size):\n \"\"\"\n Return initial hidden state (at the start of the episode),\n i.e. zero vectors.\n \"\"\"\n device = self.lstm.weight_hh_l0.device\n h = torch.zeros(1, batch_size, self.latent_size).to(device)\n c = torch.zeros(1, batch_size, self.latent_size).to(device)\n return h, c\n\n def forward(self, image_observation, additional_features, hidden_states=None, head_indeces=None, return_sequence=False):\n \"\"\"\n Trajectory/timesteps first. Returns hidden states (h, c).\n \"\"\"\n assert hidden_states is not None, \"No hidden states provided\"\n assert head_indeces is None, \"Sub-tasks not supported for LSTM yet\"\n\n # Normalize image (uint8)\n x = image_observation.float() / 255.0\n\n # Flatten batch and timestep axis into one to run through\n # CNN head.\n # Tested to work as expected with tests/test_time_parallelization.py\n x_shape = x.shape\n x = x.reshape(x_shape[0] * x_shape[1], x_shape[2], x_shape[3], x_shape[4])\n x = self.cnn_head(x)\n # Fully-connected\n x = F.relu(self.cnn_fc(x), inplace=True)\n # Now return to original seq_len, batch_size dim\n x = x.view(x_shape[0], x_shape[1], -1)\n\n # Add additional features\n x = torch.cat((x, additional_features), dim=2)\n\n # Run through lstms\n x, hidden_states = self.lstm(x, hidden_states)\n\n if not return_sequence:\n # Restrict to only last element in sequence\n x = x[-1]\n\n # Aaaand final mapping to output\n x = self.final_fc(x)\n\n # Split to different dicts according to output sizes\n outputs = {}\n i = 0\n for name, size in self.output_dict.items():\n if return_sequence:\n # Go over sequence length as well\n outputs[name] = x[:, :, i:i + size]\n else:\n outputs[name] = x[:, i:i + size]\n i += size\n\n return outputs, hidden_states\n"
] | [
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.rand",
"torch.nn.LSTM",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.no_grad",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.relu"
]
] |
rmardushall/openmc | [
"62d4f725b027f1b65755e5a150a153dd1deba481"
] | [
"openmc/material.py"
] | [
"from collections import OrderedDict\nfrom copy import deepcopy\nfrom numbers import Real, Integral\nimport warnings\nfrom xml.etree import ElementTree as ET\n\nfrom six import string_types\nimport numpy as np\n\nimport openmc\nimport openmc.data\nimport openmc.checkvalue as cv\nfrom openmc.clean_xml import sort_xml_elements, clean_xml_indentation\nfrom .mixin import IDManagerMixin\n\n\n# Units for density supported by OpenMC\nDENSITY_UNITS = ['g/cm3', 'g/cc', 'kg/cm3', 'atom/b-cm', 'atom/cm3', 'sum',\n 'macro']\n\n\nclass Material(IDManagerMixin):\n \"\"\"A material composed of a collection of nuclides/elements.\n\n To create a material, one should create an instance of this class, add\n nuclides or elements with :meth:`Material.add_nuclide` or\n `Material.add_element`, respectively, and set the total material density\n with `Material.export_to_xml()`. The material can then be assigned to a cell\n using the :attr:`Cell.fill` attribute.\n\n Parameters\n ----------\n material_id : int, optional\n Unique identifier for the material. If not specified, an identifier will\n automatically be assigned.\n name : str, optional\n Name of the material. If not specified, the name will be the empty\n string.\n temperature : float, optional\n Temperature of the material in Kelvin. If not specified, the material\n inherits the default temperature applied to the model.\n\n Attributes\n ----------\n id : int\n Unique identifier for the material\n temperature : float\n Temperature of the material in Kelvin.\n density : float\n Density of the material (units defined separately)\n density_units : str\n Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/cm3',\n 'atom/b-cm', 'atom/cm3', 'sum', or 'macro'. The 'macro' unit only\n applies in the case of a multi-group calculation.\n depletable : bool\n Indicate whether the material is depletable. This attribute can be used\n by downstream depletion applications.\n elements : list of tuple\n List in which each item is a 4-tuple consisting of an\n :class:`openmc.Element` instance, the percent density, the percent\n type ('ao' or 'wo'), and enrichment.\n nuclides : list of tuple\n List in which each item is a 3-tuple consisting of an\n :class:`openmc.Nuclide` instance, the percent density, and the percent\n type ('ao' or 'wo').\n average_molar_mass : float\n The average molar mass of nuclides in the material in units of grams per\n mol. For example, UO2 with 3 nuclides will have an average molar mass\n of 270 / 3 = 90 g / mol.\n volume : float\n Volume of the material in cm^3. This can either be set manually or\n calculated in a stochastic volume calculation and added via the\n :meth:`Material.add_volume_information` method.\n paths : list of str\n The paths traversed through the CSG tree to reach each material\n instance. This property is initialized by calling the\n :meth:`Geometry.determine_paths` method.\n num_instances : int\n The number of instances of this material throughout the geometry.\n\n \"\"\"\n\n next_id = 1\n used_ids = set()\n\n def __init__(self, material_id=None, name='', temperature=None):\n # Initialize class attributes\n self.id = material_id\n self.name = name\n self.temperature = temperature\n self._density = None\n self._density_units = ''\n self._depletable = False\n self._paths = None\n self._num_instances = None\n self._volume = None\n self._atoms = {}\n\n # A list of tuples (nuclide, percent, percent type)\n self._nuclides = []\n\n # The single instance of Macroscopic data present in this material\n # (only one is allowed, hence this is different than _nuclides, etc)\n self._macroscopic = None\n\n # A list of tuples (element, percent, percent type, enrichment)\n self._elements = []\n\n # If specified, a list of table names\n self._sab = []\n\n # If true, the material will be initialized as distributed\n self._convert_to_distrib_comps = False\n\n # If specified, this file will be used instead of composition values\n self._distrib_otf_file = None\n\n def __eq__(self, other):\n if not isinstance(other, Material):\n return False\n elif self.id != other.id:\n return False\n elif self.name != other.name:\n return False\n # FIXME: We cannot compare densities since OpenMC outputs densities\n # in atom/b-cm in summary.h5 irregardless of input units, and we\n # cannot compute the sum percent in Python since we lack AWR\n #elif self.density != other.density:\n # return False\n #elif self._nuclides != other._nuclides:\n # return False\n #elif self._elements != other._elements:\n # return False\n elif self._sab != other._sab:\n return False\n else:\n return True\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(repr(self))\n\n def __repr__(self):\n string = 'Material\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tID', self._id)\n string += '{: <16}=\\t{}\\n'.format('\\tName', self._name)\n string += '{: <16}=\\t{}\\n'.format('\\tTemperature', self._temperature)\n\n string += '{: <16}=\\t{}'.format('\\tDensity', self._density)\n string += ' [{}]\\n'.format(self._density_units)\n\n string += '{: <16}\\n'.format('\\tS(a,b) Tables')\n\n for sab in self._sab:\n string += '{: <16}=\\t{}\\n'.format('\\tS(a,b)', sab)\n\n string += '{: <16}\\n'.format('\\tNuclides')\n\n for nuclide, percent, percent_type in self._nuclides:\n string += '{0: <16}'.format('\\t{0.name}'.format(nuclide))\n string += '=\\t{: <12} [{}]\\n'.format(percent, percent_type)\n\n if self._macroscopic is not None:\n string += '{: <16}\\n'.format('\\tMacroscopic Data')\n string += '{: <16}'.format('\\t{}'.format(self._macroscopic))\n\n string += '{: <16}\\n'.format('\\tElements')\n\n for element, percent, percent_type, enr in self._elements:\n string += '{0: <16}'.format('\\t{0.name}'.format(element))\n if enr is None:\n string += '=\\t{: <12} [{}]\\n'.format(percent, percent_type)\n else:\n string += '=\\t{: <12} [{}] @ {} w/o enrichment\\n'\\\n .format(percent, percent_type, enr)\n\n return string\n\n @property\n def name(self):\n return self._name\n\n @property\n def temperature(self):\n return self._temperature\n\n @property\n def density(self):\n return self._density\n\n @property\n def density_units(self):\n return self._density_units\n\n @property\n def depletable(self):\n return self._depletable\n\n @property\n def paths(self):\n if self._paths is None:\n raise ValueError('Material instance paths have not been determined. '\n 'Call the Geometry.determine_paths() method.')\n return self._paths\n\n @property\n def num_instances(self):\n if self._num_instances is None:\n raise ValueError(\n 'Number of material instances have not been determined. Call '\n 'the Geometry.determine_paths() method.')\n return self._num_instances\n\n @property\n def elements(self):\n return self._elements\n\n @property\n def nuclides(self):\n return self._nuclides\n\n @property\n def convert_to_distrib_comps(self):\n return self._convert_to_distrib_comps\n\n @property\n def distrib_otf_file(self):\n return self._distrib_otf_file\n\n @property\n def average_molar_mass(self):\n\n # Get a list of all the nuclides, with elements expanded\n nuclide_densities = self.get_nuclide_densities()\n\n # Using the sum of specified atomic or weight amounts as a basis, sum\n # the mass and moles of the material\n mass = 0.\n moles = 0.\n for nuc, vals in nuclide_densities.items():\n if vals[2] == 'ao':\n mass += vals[1] * openmc.data.atomic_mass(nuc)\n moles += vals[1]\n else:\n moles += vals[1] / openmc.data.atomic_mass(nuc)\n mass += vals[1]\n\n # Compute and return the molar mass\n return mass / moles\n\n @property\n def volume(self):\n return self._volume\n\n @name.setter\n def name(self, name):\n if name is not None:\n cv.check_type('name for Material ID=\"{}\"'.format(self._id),\n name, string_types)\n self._name = name\n else:\n self._name = ''\n\n @temperature.setter\n def temperature(self, temperature):\n cv.check_type('Temperature for Material ID=\"{}\"'.format(self._id),\n temperature, (Real, type(None)))\n self._temperature = temperature\n\n @depletable.setter\n def depletable(self, depletable):\n cv.check_type('Depletable flag for Material ID=\"{}\"'.format(self.id),\n depletable, bool)\n self._depletable = depletable\n\n @volume.setter\n def volume(self, volume):\n if volume is not None:\n cv.check_type('material volume', volume, Real)\n self._volume = volume\n\n @classmethod\n def from_hdf5(cls, group):\n \"\"\"Create material from HDF5 group\n\n Parameters\n ----------\n group : h5py.Group\n Group in HDF5 file\n\n Returns\n -------\n openmc.Material\n Material instance\n\n \"\"\"\n mat_id = int(group.name.split('/')[-1].lstrip('material '))\n\n name = group['name'].value.decode() if 'name' in group else ''\n density = group['atom_density'].value\n nuc_densities = group['nuclide_densities'][...]\n nuclides = group['nuclides'].value\n\n # Create the Material\n material = cls(mat_id, name)\n material.depletable = bool(group.attrs['depletable'])\n\n # Read the names of the S(a,b) tables for this Material and add them\n if 'sab_names' in group:\n sab_tables = group['sab_names'].value\n for sab_table in sab_tables:\n name = sab_table.decode()\n material.add_s_alpha_beta(name)\n\n # Set the Material's density to atom/b-cm as used by OpenMC\n material.set_density(density=density, units='atom/b-cm')\n\n # Add all nuclides to the Material\n for fullname, density in zip(nuclides, nuc_densities):\n name = fullname.decode().strip()\n material.add_nuclide(name, percent=density, percent_type='ao')\n\n return material\n\n def add_volume_information(self, volume_calc):\n \"\"\"Add volume information to a material.\n\n Parameters\n ----------\n volume_calc : openmc.VolumeCalculation\n Results from a stochastic volume calculation\n\n \"\"\"\n if volume_calc.domain_type == 'material':\n if self.id in volume_calc.volumes:\n self._volume = volume_calc.volumes[self.id][0]\n self._atoms = volume_calc.atoms[self.id]\n else:\n raise ValueError('No volume information found for this material.')\n else:\n raise ValueError('No volume information found for this material.')\n\n def set_density(self, units, density=None):\n \"\"\"Set the density of the material\n\n Parameters\n ----------\n units : {'g/cm3', 'g/cc', 'kg/cm3', 'atom/b-cm', 'atom/cm3', 'sum', 'macro'}\n Physical units of density.\n density : float, optional\n Value of the density. Must be specified unless units is given as\n 'sum'.\n\n \"\"\"\n\n cv.check_value('density units', units, DENSITY_UNITS)\n self._density_units = units\n\n if units == 'sum':\n if density is not None:\n msg = 'Density \"{}\" for Material ID=\"{}\" is ignored ' \\\n 'because the unit is \"sum\"'.format(density, self.id)\n warnings.warn(msg)\n else:\n if density is None:\n msg = 'Unable to set the density for Material ID=\"{}\" ' \\\n 'because a density value must be given when not using ' \\\n '\"sum\" unit'.format(self.id)\n raise ValueError(msg)\n\n cv.check_type('the density for Material ID=\"{}\"'.format(self.id),\n density, Real)\n self._density = density\n\n @distrib_otf_file.setter\n def distrib_otf_file(self, filename):\n # TODO: remove this when distributed materials are merged\n warnings.warn('This feature is not yet implemented in a release '\n 'version of openmc')\n\n if not isinstance(filename, string_types) and filename is not None:\n msg = 'Unable to add OTF material file to Material ID=\"{}\" with a ' \\\n 'non-string name \"{}\"'.format(self._id, filename)\n raise ValueError(msg)\n\n self._distrib_otf_file = filename\n\n @convert_to_distrib_comps.setter\n def convert_to_distrib_comps(self):\n # TODO: remove this when distributed materials are merged\n warnings.warn('This feature is not yet implemented in a release '\n 'version of openmc')\n\n self._convert_to_distrib_comps = True\n\n def add_nuclide(self, nuclide, percent, percent_type='ao'):\n \"\"\"Add a nuclide to the material\n\n Parameters\n ----------\n nuclide : str or openmc.Nuclide\n Nuclide to add\n percent : float\n Atom or weight percent\n percent_type : {'ao', 'wo'}\n 'ao' for atom percent and 'wo' for weight percent\n\n \"\"\"\n\n if self._macroscopic is not None:\n msg = 'Unable to add a Nuclide to Material ID=\"{}\" as a ' \\\n 'macroscopic data-set has already been added'.format(self._id)\n raise ValueError(msg)\n\n if not isinstance(nuclide, string_types + (openmc.Nuclide,)):\n msg = 'Unable to add a Nuclide to Material ID=\"{}\" with a ' \\\n 'non-Nuclide value \"{}\"'.format(self._id, nuclide)\n raise ValueError(msg)\n\n elif not isinstance(percent, Real):\n msg = 'Unable to add a Nuclide to Material ID=\"{}\" with a ' \\\n 'non-floating point value \"{}\"'.format(self._id, percent)\n raise ValueError(msg)\n\n elif percent_type not in ['ao', 'wo', 'at/g-cm']:\n msg = 'Unable to add a Nuclide to Material ID=\"{}\" with a ' \\\n 'percent type \"{}\"'.format(self._id, percent_type)\n raise ValueError(msg)\n\n if isinstance(nuclide, openmc.Nuclide):\n # Copy this Nuclide to separate it from the Nuclide in\n # other Materials\n nuclide = deepcopy(nuclide)\n else:\n nuclide = openmc.Nuclide(nuclide)\n\n self._nuclides.append((nuclide, percent, percent_type))\n\n def remove_nuclide(self, nuclide):\n \"\"\"Remove a nuclide from the material\n\n Parameters\n ----------\n nuclide : openmc.Nuclide\n Nuclide to remove\n\n \"\"\"\n cv.check_type('nuclide', nuclide, string_types + (openmc.Nuclide,))\n\n if isinstance(nuclide, string_types):\n nuclide = openmc.Nuclide(nuclide)\n\n # If the Material contains the Nuclide, delete it\n for nuc in self._nuclides:\n if nuclide == nuc[0]:\n self._nuclides.remove(nuc)\n break\n\n def add_macroscopic(self, macroscopic):\n \"\"\"Add a macroscopic to the material. This will also set the\n density of the material to 1.0, unless it has been otherwise set,\n as a default for Macroscopic cross sections.\n\n Parameters\n ----------\n macroscopic : str or openmc.Macroscopic\n Macroscopic to add\n\n \"\"\"\n\n # Ensure no nuclides, elements, or sab are added since these would be\n # incompatible with macroscopics\n if self._nuclides or self._elements or self._sab:\n msg = 'Unable to add a Macroscopic data set to Material ID=\"{}\" ' \\\n 'with a macroscopic value \"{}\" as an incompatible data ' \\\n 'member (i.e., nuclide, element, or S(a,b) table) ' \\\n 'has already been added'.format(self._id, macroscopic)\n raise ValueError(msg)\n\n if not isinstance(macroscopic, string_types + (openmc.Macroscopic,)):\n msg = 'Unable to add a Macroscopic to Material ID=\"{}\" with a ' \\\n 'non-Macroscopic value \"{}\"'.format(self._id, macroscopic)\n raise ValueError(msg)\n\n if isinstance(macroscopic, openmc.Macroscopic):\n # Copy this Macroscopic to separate it from the Macroscopic in\n # other Materials\n macroscopic = deepcopy(macroscopic)\n else:\n macroscopic = openmc.Macroscopic(macroscopic)\n\n if self._macroscopic is None:\n self._macroscopic = macroscopic\n else:\n msg = 'Unable to add a Macroscopic to Material ID=\"{}\". ' \\\n 'Only one Macroscopic allowed per ' \\\n 'Material.'.format(self._id)\n raise ValueError(msg)\n\n # Generally speaking, the density for a macroscopic object will\n # be 1.0. Therefore, lets set density to 1.0 so that the user\n # doesnt need to set it unless its needed.\n # Of course, if the user has already set a value of density,\n # then we will not override it.\n if self._density is None:\n self.set_density('macro', 1.0)\n\n def remove_macroscopic(self, macroscopic):\n \"\"\"Remove a macroscopic from the material\n\n Parameters\n ----------\n macroscopic : openmc.Macroscopic\n Macroscopic to remove\n\n \"\"\"\n\n if not isinstance(macroscopic, openmc.Macroscopic):\n msg = 'Unable to remove a Macroscopic \"{}\" in Material ID=\"{}\" ' \\\n 'since it is not a Macroscopic'.format(self._id, macroscopic)\n raise ValueError(msg)\n\n # If the Material contains the Macroscopic, delete it\n if macroscopic.name == self._macroscopic.name:\n self._macroscopic = None\n\n def add_element(self, element, percent, percent_type='ao', enrichment=None):\n \"\"\"Add a natural element to the material\n\n Parameters\n ----------\n element : openmc.Element or str\n Element to add\n percent : float\n Atom or weight percent\n percent_type : {'ao', 'wo'}, optional\n 'ao' for atom percent and 'wo' for weight percent. Defaults to atom\n percent.\n enrichment : float, optional\n Enrichment for U235 in weight percent. For example, input 4.95 for\n 4.95 weight percent enriched U. Default is None\n (natural composition).\n\n \"\"\"\n\n if self._macroscopic is not None:\n msg = 'Unable to add an Element to Material ID=\"{}\" as a ' \\\n 'macroscopic data-set has already been added'.format(self._id)\n raise ValueError(msg)\n\n if not isinstance(element, string_types + (openmc.Element,)):\n msg = 'Unable to add an Element to Material ID=\"{}\" with a ' \\\n 'non-Element value \"{}\"'.format(self._id, element)\n raise ValueError(msg)\n\n if not isinstance(percent, Real):\n msg = 'Unable to add an Element to Material ID=\"{}\" with a ' \\\n 'non-floating point value \"{}\"'.format(self._id, percent)\n raise ValueError(msg)\n\n if percent_type not in ['ao', 'wo']:\n msg = 'Unable to add an Element to Material ID=\"{}\" with a ' \\\n 'percent type \"{}\"'.format(self._id, percent_type)\n raise ValueError(msg)\n\n # Copy this Element to separate it from same Element in other Materials\n if isinstance(element, openmc.Element):\n element = deepcopy(element)\n else:\n element = openmc.Element(element)\n\n if enrichment is not None:\n if not isinstance(enrichment, Real):\n msg = 'Unable to add an Element to Material ID=\"{}\" with a ' \\\n 'non-floating point enrichment value \"{}\"'\\\n .format(self._id, enrichment)\n raise ValueError(msg)\n\n elif element.name != 'U':\n msg = 'Unable to use enrichment for element {} which is not ' \\\n 'uranium for Material ID=\"{}\"'.format(element.name,\n self._id)\n raise ValueError(msg)\n\n # Check that the enrichment is in the valid range\n cv.check_less_than('enrichment', enrichment, 100./1.008)\n cv.check_greater_than('enrichment', enrichment, 0., equality=True)\n\n if enrichment > 5.0:\n msg = 'A uranium enrichment of {} was given for Material ID='\\\n '\"{}\". OpenMC assumes the U234/U235 mass ratio is '\\\n 'constant at 0.008, which is only valid at low ' \\\n 'enrichments. Consider setting the isotopic ' \\\n 'composition manually for enrichments over 5%.'.\\\n format(enrichment, self._id)\n warnings.warn(msg)\n\n self._elements.append((element, percent, percent_type, enrichment))\n\n def remove_element(self, element):\n \"\"\"Remove a natural element from the material\n\n Parameters\n ----------\n element : openmc.Element\n Element to remove\n\n \"\"\"\n cv.check_type('element', element, string_types + (openmc.Element,))\n\n if isinstance(element, string_types):\n element = openmc.Element(element)\n\n # If the Material contains the Element, delete it\n for elm in self._elements:\n if element == elm[0]:\n self._elements.remove(elm)\n\n def add_s_alpha_beta(self, name, fraction=1.0):\n r\"\"\"Add an :math:`S(\\alpha,\\beta)` table to the material\n\n Parameters\n ----------\n name : str\n Name of the :math:`S(\\alpha,\\beta)` table\n fraction : float\n The fraction of relevant nuclei that are affected by the\n :math:`S(\\alpha,\\beta)` table. For example, if the material is a\n block of carbon that is 60% graphite and 40% amorphous then add a\n graphite :math:`S(\\alpha,\\beta)` table with fraction=0.6.\n\n \"\"\"\n\n if self._macroscopic is not None:\n msg = 'Unable to add an S(a,b) table to Material ID=\"{}\" as a ' \\\n 'macroscopic data-set has already been added'.format(self._id)\n raise ValueError(msg)\n\n if not isinstance(name, string_types):\n msg = 'Unable to add an S(a,b) table to Material ID=\"{}\" with a ' \\\n 'non-string table name \"{}\"'.format(self._id, name)\n raise ValueError(msg)\n\n cv.check_type('S(a,b) fraction', fraction, Real)\n cv.check_greater_than('S(a,b) fraction', fraction, 0.0, True)\n cv.check_less_than('S(a,b) fraction', fraction, 1.0, True)\n\n new_name = openmc.data.get_thermal_name(name)\n if new_name != name:\n msg = 'OpenMC S(a,b) tables follow the GND naming convention. ' \\\n 'Table \"{}\" is being renamed as \"{}\".'.format(name, new_name)\n warnings.warn(msg)\n\n self._sab.append((new_name, fraction))\n\n def make_isotropic_in_lab(self):\n for nuclide, percent, percent_type in self._nuclides:\n nuclide.scattering = 'iso-in-lab'\n for element, percent, percent_type, enrichment in self._elements:\n element.scattering = 'iso-in-lab'\n\n def get_nuclides(self):\n \"\"\"Returns all nuclides in the material\n\n Returns\n -------\n nuclides : list of str\n List of nuclide names\n\n \"\"\"\n\n nuclides = []\n\n for nuclide, percent, percent_type in self._nuclides:\n nuclides.append(nuclide.name)\n\n for ele, ele_pct, ele_pct_type, enr in self._elements:\n\n # Expand natural element into isotopes\n isotopes = ele.expand(ele_pct, ele_pct_type, enr)\n for iso, iso_pct, iso_pct_type in isotopes:\n nuclides.append(iso.name)\n\n return nuclides\n\n def get_nuclide_densities(self):\n \"\"\"Returns all nuclides in the material and their densities\n\n Returns\n -------\n nuclides : dict\n Dictionary whose keys are nuclide names and values are 3-tuples of\n (nuclide, density percent, density percent type)\n\n \"\"\"\n\n nuclides = OrderedDict()\n\n for nuclide, density, density_type in self._nuclides:\n nuclides[nuclide.name] = (nuclide, density, density_type)\n\n for ele, ele_pct, ele_pct_type, enr in self._elements:\n\n # Expand natural element into isotopes\n isotopes = ele.expand(ele_pct, ele_pct_type, enr)\n for iso, iso_pct, iso_pct_type in isotopes:\n nuclides[iso.name] = (iso, iso_pct, iso_pct_type)\n\n return nuclides\n\n def get_nuclide_atom_densities(self):\n \"\"\"Returns all nuclides in the material and their atomic densities in\n units of atom/b-cm\n\n Returns\n -------\n nuclides : dict\n Dictionary whose keys are nuclide names and values are tuples of\n (nuclide, density in atom/b-cm)\n\n \"\"\"\n\n # Expand elements in to nuclides\n nuclides = self.get_nuclide_densities()\n\n sum_density = False\n if self.density_units == 'sum':\n sum_density = True\n density = 0.\n elif self.density_units == 'macro':\n density = self.density\n elif self.density_units == 'g/cc' or self.density_units == 'g/cm3':\n density = -self.density\n elif self.density_units == 'kg/m3':\n density = -0.001 * self.density\n elif self.density_units == 'atom/b-cm':\n density = self.density\n elif self.density_units == 'atom/cm3' or self.density_units == 'atom/cc':\n density = 1.E-24 * self.density\n\n # For ease of processing split out nuc, nuc_density,\n # and nuc_density_type in to separate arrays\n nucs = []\n nuc_densities = []\n nuc_density_types = []\n\n for nuclide in nuclides.items():\n nuc, nuc_density, nuc_density_type = nuclide[1]\n nucs.append(nuc)\n nuc_densities.append(nuc_density)\n nuc_density_types.append(nuc_density_type)\n\n nucs = np.array(nucs)\n nuc_densities = np.array(nuc_densities)\n nuc_density_types = np.array(nuc_density_types)\n\n if sum_density:\n density = np.sum(nuc_densities)\n\n percent_in_atom = np.all(nuc_density_types == 'ao')\n density_in_atom = density > 0.\n sum_percent = 0.\n\n # Convert the weight amounts to atomic amounts\n if not percent_in_atom:\n for n, nuc in enumerate(nucs):\n nuc_densities[n] *= self.average_molar_mass / \\\n openmc.data.atomic_mass(nuc.name)\n\n # Now that we have the atomic amounts, lets finish calculating densities\n sum_percent = np.sum(nuc_densities)\n nuc_densities = nuc_densities / sum_percent\n\n # Convert the mass density to an atom density\n if not density_in_atom:\n density = -density / self.average_molar_mass * 1.E-24 \\\n * openmc.data.AVOGADRO\n\n nuc_densities = density * nuc_densities\n\n nuclides = OrderedDict()\n for n, nuc in enumerate(nucs):\n nuclides[nuc] = (nuc, nuc_densities[n])\n\n return nuclides\n\n def clone(self, memo=None):\n \"\"\"Create a copy of this material with a new unique ID.\n\n Parameters\n ----------\n memo : dict or None\n A nested dictionary of previously cloned objects. This parameter\n is used internally and should not be specified by the user.\n\n Returns\n -------\n clone : openmc.Material\n The clone of this material\n\n \"\"\"\n\n if memo is None:\n memo = {}\n\n # If no nemoize'd clone exists, instantiate one\n if self not in memo:\n # Temporarily remove paths -- this is done so that when the clone is\n # made, it doesn't create a copy of the paths (which are specific to\n # an instance)\n paths = self._paths\n self._paths = None\n\n clone = deepcopy(self)\n clone.id = None\n clone._num_instances = None\n\n # Restore paths on original instance\n self._paths = paths\n\n # Memoize the clone\n memo[self] = clone\n\n return memo[self]\n\n def _get_nuclide_xml(self, nuclide, distrib=False):\n xml_element = ET.Element(\"nuclide\")\n xml_element.set(\"name\", nuclide[0].name)\n\n if not distrib:\n if nuclide[2] == 'ao':\n xml_element.set(\"ao\", str(nuclide[1]))\n else:\n xml_element.set(\"wo\", str(nuclide[1]))\n\n if not nuclide[0].scattering is None:\n xml_element.set(\"scattering\", nuclide[0].scattering)\n\n return xml_element\n\n def _get_macroscopic_xml(self, macroscopic):\n xml_element = ET.Element(\"macroscopic\")\n xml_element.set(\"name\", macroscopic.name)\n\n return xml_element\n\n def _get_element_xml(self, element, cross_sections, distrib=False):\n\n # Get the nuclides in this element\n nuclides = element[0].expand(element[1], element[2], element[3],\n cross_sections)\n\n xml_elements = []\n for nuclide in nuclides:\n xml_elements.append(self._get_nuclide_xml(nuclide, distrib))\n\n return xml_elements\n\n def _get_nuclides_xml(self, nuclides, distrib=False):\n\n xml_elements = []\n\n for nuclide in nuclides:\n xml_elements.append(self._get_nuclide_xml(nuclide, distrib))\n\n return xml_elements\n\n def _get_elements_xml(self, elements, cross_sections, distrib=False):\n\n xml_elements = []\n\n for element in elements:\n nuclide_elements = self._get_element_xml(element, cross_sections,\n distrib)\n for nuclide_element in nuclide_elements:\n xml_elements.append(nuclide_element)\n\n return xml_elements\n\n def to_xml_element(self, cross_sections=None):\n \"\"\"Return XML representation of the material\n\n Parameters\n ----------\n cross_sections : str\n Path to an XML cross sections listing file\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing material data\n\n \"\"\"\n\n # Create Material XML element\n element = ET.Element(\"material\")\n element.set(\"id\", str(self._id))\n\n if len(self._name) > 0:\n element.set(\"name\", str(self._name))\n\n if self._depletable:\n element.set(\"depletable\", \"true\")\n\n # Create temperature XML subelement\n if self.temperature is not None:\n subelement = ET.SubElement(element, \"temperature\")\n subelement.text = str(self.temperature)\n\n # Create density XML subelement\n if self._density is not None or self._density_units == 'sum':\n subelement = ET.SubElement(element, \"density\")\n if self._density_units != 'sum':\n subelement.set(\"value\", str(self._density))\n subelement.set(\"units\", self._density_units)\n else:\n raise ValueError('Density has not been set for material {}!'\n .format(self.id))\n\n if not self._convert_to_distrib_comps:\n if self._macroscopic is None:\n # Create nuclide XML subelements\n subelements = self._get_nuclides_xml(self._nuclides)\n for subelement in subelements:\n element.append(subelement)\n\n # Create element XML subelements\n subelements = self._get_elements_xml(self._elements,\n cross_sections)\n for subelement in subelements:\n element.append(subelement)\n else:\n # Create macroscopic XML subelements\n subelement = self._get_macroscopic_xml(self._macroscopic)\n element.append(subelement)\n\n else:\n subelement = ET.SubElement(element, \"compositions\")\n\n comps = []\n allnucs = self._nuclides + self._elements\n dist_per_type = allnucs[0][2]\n for nuc in allnucs:\n if nuc[2] != dist_per_type:\n msg = 'All nuclides and elements in a distributed ' \\\n 'material must have the same type, either ao or wo'\n raise ValueError(msg)\n comps.append(nuc[1])\n\n if self._distrib_otf_file is None:\n # Create values and units subelements\n subsubelement = ET.SubElement(subelement, \"values\")\n subsubelement.text = ' '.join([str(c) for c in comps])\n subsubelement = ET.SubElement(subelement, \"units\")\n subsubelement.text = dist_per_type\n else:\n # Specify the materials file\n subsubelement = ET.SubElement(subelement, \"otf_file_path\")\n subsubelement.text = self._distrib_otf_file\n\n if self._macroscopic is None:\n # Create nuclide XML subelements\n subelements = self._get_nuclides_xml(self._nuclides,\n distrib=True)\n for subelement_nuc in subelements:\n subelement.append(subelement_nuc)\n\n # Create element XML subelements\n subelements = self._get_elements_xml(self._elements,\n cross_sections,\n distrib=True)\n for subsubelement in subelements:\n subelement.append(subsubelement)\n else:\n # Create macroscopic XML subelements\n subsubelement = self._get_macroscopic_xml(self._macroscopic)\n subelement.append(subsubelement)\n\n if len(self._sab) > 0:\n for sab in self._sab:\n subelement = ET.SubElement(element, \"sab\")\n subelement.set(\"name\", sab[0])\n if sab[1] != 1.0:\n subelement.set(\"fraction\", str(sab[1]))\n\n return element\n\n\nclass Materials(cv.CheckedList):\n \"\"\"Collection of Materials used for an OpenMC simulation.\n\n This class corresponds directly to the materials.xml input file. It can be\n thought of as a normal Python list where each member is a\n :class:`Material`. It behaves like a list as the following example\n demonstrates:\n\n >>> fuel = openmc.Material()\n >>> clad = openmc.Material()\n >>> water = openmc.Material()\n >>> m = openmc.Materials([fuel])\n >>> m.append(water)\n >>> m += [clad]\n\n Parameters\n ----------\n materials : Iterable of openmc.Material\n Materials to add to the collection\n cross_sections : str\n Indicates the path to an XML cross section listing file (usually named\n cross_sections.xml). If it is not set, the\n :envvar:`OPENMC_CROSS_SECTIONS` environment variable will be used for\n continuous-energy calculations and\n :envvar:`OPENMC_MG_CROSS_SECTIONS` will be used for multi-group\n calculations to find the path to the HDF5 cross section file.\n multipole_library : str\n Indicates the path to a directory containing a windowed multipole\n cross section library. If it is not set, the\n :envvar:`OPENMC_MULTIPOLE_LIBRARY` environment variable will be used. A\n multipole library is optional.\n\n \"\"\"\n\n def __init__(self, materials=None):\n super(Materials, self).__init__(Material, 'materials collection')\n self._cross_sections = None\n self._multipole_library = None\n\n if materials is not None:\n self += materials\n\n @property\n def cross_sections(self):\n return self._cross_sections\n\n @property\n def multipole_library(self):\n return self._multipole_library\n\n @cross_sections.setter\n def cross_sections(self, cross_sections):\n cv.check_type('cross sections', cross_sections, string_types)\n self._cross_sections = cross_sections\n\n @multipole_library.setter\n def multipole_library(self, multipole_library):\n cv.check_type('cross sections', multipole_library, string_types)\n self._multipole_library = multipole_library\n\n def add_material(self, material):\n \"\"\"Append material to collection\n\n .. deprecated:: 0.8\n Use :meth:`Materials.append` instead.\n\n Parameters\n ----------\n material : openmc.Material\n Material to add\n\n \"\"\"\n warnings.warn(\"Materials.add_material(...) has been deprecated and may be \"\n \"removed in a future version. Use Material.append(...) \"\n \"instead.\", DeprecationWarning)\n self.append(material)\n\n def add_materials(self, materials):\n \"\"\"Add multiple materials to the collection\n\n .. deprecated:: 0.8\n Use compound assignment instead.\n\n Parameters\n ----------\n materials : Iterable of openmc.Material\n Materials to add\n\n \"\"\"\n warnings.warn(\"Materials.add_materials(...) has been deprecated and may be \"\n \"removed in a future version. Use compound assignment \"\n \"instead.\", DeprecationWarning)\n for material in materials:\n self.append(material)\n\n def append(self, material):\n \"\"\"Append material to collection\n\n Parameters\n ----------\n material : openmc.Material\n Material to append\n\n \"\"\"\n super(Materials, self).append(material)\n\n def insert(self, index, material):\n \"\"\"Insert material before index\n\n Parameters\n ----------\n index : int\n Index in list\n material : openmc.Material\n Material to insert\n\n \"\"\"\n super(Materials, self).insert(index, material)\n\n def remove_material(self, material):\n \"\"\"Remove a material from the file\n\n .. deprecated:: 0.8\n Use :meth:`Materials.remove` instead.\n\n Parameters\n ----------\n material : openmc.Material\n Material to remove\n\n \"\"\"\n warnings.warn(\"Materials.remove_material(...) has been deprecated and \"\n \"may be removed in a future version. Use \"\n \"Materials.remove(...) instead.\", DeprecationWarning)\n self.remove(material)\n\n def make_isotropic_in_lab(self):\n for material in self:\n material.make_isotropic_in_lab()\n\n def _create_material_subelements(self, root_element):\n for material in self:\n root_element.append(material.to_xml_element(self.cross_sections))\n\n def _create_cross_sections_subelement(self, root_element):\n if self._cross_sections is not None:\n element = ET.SubElement(root_element, \"cross_sections\")\n element.text = str(self._cross_sections)\n\n def _create_multipole_library_subelement(self, root_element):\n if self._multipole_library is not None:\n element = ET.SubElement(root_element, \"multipole_library\")\n element.text = str(self._multipole_library)\n\n def export_to_xml(self, path='materials.xml'):\n \"\"\"Export material collection to an XML file.\n\n Parameters\n ----------\n path : str\n Path to file to write. Defaults to 'materials.xml'.\n\n \"\"\"\n\n root_element = ET.Element(\"materials\")\n self._create_material_subelements(root_element)\n self._create_cross_sections_subelement(root_element)\n self._create_multipole_library_subelement(root_element)\n\n # Clean the indentation in the file to be user-readable\n sort_xml_elements(root_element)\n clean_xml_indentation(root_element)\n\n # Write the XML Tree to the materials.xml file\n tree = ET.ElementTree(root_element)\n tree.write(path, xml_declaration=True, encoding='utf-8', method=\"xml\")\n"
] | [
[
"numpy.all",
"numpy.sum",
"numpy.array"
]
] |
rusher321/RNA-seq-2019nCov | [
"b216b66ab618489dfa66686a813003107f3b8837"
] | [
"sample.py"
] | [
"#!/usr/bin/env python\nimport glob\nimport os\nimport pandas as pd\n\n\ndef samples_validator(sample_df, input_type, is_pe):\n error_count = 0\n for i in sample_df.index:\n if input_type == \"fastq\":\n if is_pe:\n fq1 = sample_df.loc[[i], \"fq1\"].dropna().tolist()\n fq2 = sample_df.loc[[i], \"fq2\"].dropna().tolist()\n for r1, r2 in zip(fq1, fq2):\n if (not os.path.exists(r1)) or (not os.path.exists(r2)):\n print(\"error:\\t%s\\t%s\\t%s\" % (i, r1, r2))\n error_count += 1\n else:\n fq = sample_df.loc[[i], \"fq1\"].dropna().tolist()\n for r in fq:\n if not os.path.exists(r):\n print(\"error:\\t%s\\t%s\" % (i, r))\n error_count += 1\n elif input_type == \"sra\":\n for sra in sample_df.loc[[i], \"sra\"].dropna().tolist():\n if not os.path.exists(sra):\n print(\"error:\\t%s\\t%s\" % (i, sra))\n error_count += 1\n else:\n print(\"wrong input type! just support fastq or sra\")\n return error_count\n\n\ndef parse_samples(samples_tsv, input_type, is_pe, check=True):\n samples_df = pd.read_csv(samples_tsv, sep='\\s+').set_index(\"id\", drop=False)\n if check:\n error_count = samples_validator(samples_df, input_type, is_pe)\n if error_count == 0:\n return samples_df\n else:\n print(\"find %d error\" % error_count)\n else:\n return samples_df\n\n\ndef parse_bins(bins_dir):\n bin_list = []\n for bin_ in glob.glob(bins_dir + \"/*/*bin*fa\"):\n bin_dict = dict()\n bin_dict[\"path\"] = bin_.strip()\n bin_dict[\"id\"] = os.path.basename(bin_).rstrip(\".fa\")\n bin_list.append(bin_dict)\n bins = pd.DataFrame(bin_list).set_index(\"id\", drop=False)\n return bins\n\n\ndef get_reads(sample_df, wildcards, col):\n return sample_df.loc[[wildcards.sample], col].dropna().tolist()\n\n\ndef get_sample_id(sample_df, wildcards, col):\n return sample_df.loc[wildcards.sample, [col]].dropna()[0]\n\n\ndef get_sample_id_(sample_df, wildcards, col):\n return sample_df.loc[wildcards.sample_, [col]].dropna()[0]\n\n\ndef get_bin_id(bin_df, wildcards, col):\n return bin_df.loc[wildcards.bin, [col]].dropna()[0]\n\n\ndef parse_cobin_samples_id(query_list):\n with open(query_list, 'r') as ih:\n samples_id = [line.strip() for line in ih]\n return samples_id\n\n\ndef renamed_id(samples_df, wildcards):\n return samples_df.loc[[wildcards.sample], \"id_2\"].dropna().tolist()[0]\n\n\ndef demultiplex_kraken2(kraken2_output, r1, r2, change_seq_id, prefix, log=None):\n start_time = time.time()\n taxid_counter = {}\n demultiplexer = {}\n\n with open(kraken2_output, 'r') as kh:\n for line in kh:\n cols = line.split('\\t')\n read_id = cols[1]\n tax_name = cols[2].split(\"(\")[0].strip()\n tax_id = int(cols[2].split(\"(\")[-1].split(\")\")[0].split()[-1])\n\n demultiplexer[read_id] = tax_id\n if tax_id in taxid_counter:\n taxid_counter[tax_id][1] += 1\n else:\n taxid_counter[tax_id] = [tax_name, 1]\n if not log is None:\n log_h.write(\"step_1: parse kraken2 output has spent %d s\\n\" % (time.time() - start_time))\n\n start_time = time.time()\n gzip_h = {}\n for i in taxid_counter:\n gzip_h[i] = {}\n gzip_h[i][\"r1\"] = bgzf.BgzfWriter(prefix + \".%d.1.fq.gz\" % i, 'wb')\n gzip_h[i][\"r2\"] = bgzf.BgzfWriter(prefix + \".%d.2.fq.gz\" % i, 'wb')\n\n if r1.endswith(\".gz\"):\n r1_h = gzip.open(r1, 'rt')\n r2_h = gzip.open(r2, 'rt')\n else:\n r1_h = open(r1, 'rt')\n r2_h = open(r2, 'rt')\n\n if change_seq_id:\n sample_tag = os.path.basename(prefix)\n\n if not log is None:\n log_h.write(\"step_2: begin demultiplex taxid-reads\\n\")\n for read_1, read_2 in zip(r1_h, r2_h):\n read_id = read_1[1:].split(\"/\")[0]\n if change_seq_id:\n gzip_h[demultiplexer[read_id]][\"r1\"].write(\">%s|%s%s%s%s\" %\n (\n sample_tag, read_1[1:], next(r1_h), next(r1_h), next(r1_h)\n ))\n gzip_h[demultiplexer[read_id]][\"r2\"].write(\">%s|%s%s%s%s\" %\n (\n sample_tag, read_2[1:], next(r2_h), next(r2_h), next(r2_h)\n ))\n else:\n gzip_h[demultiplexer[read_id]][\"r1\"].write(\"%s%s%s%s\" %\n (\n read_1, next(r1_h), next(r1_h), next(r1_h)\n ))\n gzip_h[demultiplexer[read_id]][\"r2\"].write(\"%s|%s%s%s%s\" %\n (\n read_2, next(r2_h), next(r2_h), next(r2_h)\n ))\n if not log is None:\n log_h.write(\"step_2: demultiplex taxid-reads has spent %d s\\n\" % (time.time() - start_time))\n"
] | [
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
mixxen/imgclsmob | [
"64e39e117327ca25bed8a98b9a81c397c76a8220"
] | [
"keras_/kerascv/models/squeezenet.py"
] | [
"\"\"\"\n SqueezeNet for ImageNet-1K, implemented in Keras.\n Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'\n https://arxiv.org/abs/1602.07360.\n\"\"\"\n\n__all__ = ['squeezenet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1']\n\nimport os\nfrom tensorflow.keras import layers as nn\nfrom tensorflow.keras.models import Model\nfrom .common import maxpool2d, conv2d, is_channels_first, get_channel_axis, flatten\n\n\ndef fire_conv(x,\n in_channels,\n out_channels,\n kernel_size,\n padding,\n name=\"fire_conv\"):\n \"\"\"\n SqueezeNet specific convolution block.\n\n Parameters:\n ----------\n x : keras.backend tensor/variable/symbol\n Input tensor/variable/symbol.\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n name : str, default 'fire_conv'\n Block name.\n\n Returns\n -------\n keras.backend tensor/variable/symbol\n Resulted tensor/variable/symbol.\n \"\"\"\n x = conv2d(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding,\n use_bias=True,\n name=name + \"/conv\")\n x = nn.Activation(\"relu\", name=name + \"/activ\")(x)\n return x\n\n\ndef fire_unit(x,\n in_channels,\n squeeze_channels,\n expand1x1_channels,\n expand3x3_channels,\n residual,\n name=\"fire_unit\"):\n \"\"\"\n SqueezeNet unit, so-called 'Fire' unit.\n\n Parameters:\n ----------\n x : keras.backend tensor/variable/symbol\n Input tensor/variable/symbol.\n in_channels : int\n Number of input channels.\n squeeze_channels : int\n Number of output channels for squeeze convolution blocks.\n expand1x1_channels : int\n Number of output channels for expand 1x1 convolution blocks.\n expand3x3_channels : int\n Number of output channels for expand 3x3 convolution blocks.\n residual : bool\n Whether use residual connection.\n name : str, default 'fire_unit'\n Block name.\n\n Returns\n -------\n keras.backend tensor/variable/symbol\n Resulted tensor/variable/symbol.\n \"\"\"\n if residual:\n identity = x\n\n x = fire_conv(\n x=x,\n in_channels=in_channels,\n out_channels=squeeze_channels,\n kernel_size=1,\n padding=0,\n name=name + \"/squeeze\")\n y1 = fire_conv(\n x=x,\n in_channels=squeeze_channels,\n out_channels=expand1x1_channels,\n kernel_size=1,\n padding=0,\n name=name + \"/expand1x1\")\n y2 = fire_conv(\n x=x,\n in_channels=squeeze_channels,\n out_channels=expand3x3_channels,\n kernel_size=3,\n padding=1,\n name=name + \"/expand3x3\")\n\n out = nn.concatenate([y1, y2], axis=get_channel_axis(), name=name + \"/concat\")\n\n if residual:\n out = nn.add([out, identity], name=name + \"/add\")\n\n return out\n\n\ndef squeeze_init_block(x,\n in_channels,\n out_channels,\n kernel_size,\n name=\"squeeze_init_block\"):\n \"\"\"\n ResNet specific initial block.\n\n Parameters:\n ----------\n x : keras.backend tensor/variable/symbol\n Input tensor/variable/symbol.\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n name : str, default 'squeeze_init_block'\n Block name.\n\n Returns\n -------\n keras.backend tensor/variable/symbol\n Resulted tensor/variable/symbol.\n \"\"\"\n x = conv2d(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=2,\n use_bias=True,\n name=name + \"/conv\")\n x = nn.Activation(\"relu\", name=name + \"/activ\")(x)\n return x\n\n\ndef squeezenet(channels,\n residuals,\n init_block_kernel_size,\n init_block_channels,\n in_channels=3,\n in_size=(224, 224),\n classes=1000):\n \"\"\"\n SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'\n https://arxiv.org/abs/1602.07360.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n residuals : bool\n Whether to use residual units.\n init_block_kernel_size : int or tuple/list of 2 int\n The dimensions of the convolution window for the initial unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\\\n (in_size[0], in_size[1], in_channels)\n input = nn.Input(shape=input_shape)\n\n x = squeeze_init_block(\n x=input,\n in_channels=in_channels,\n out_channels=init_block_channels,\n kernel_size=init_block_kernel_size,\n name=\"features/init_block\")\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n x = maxpool2d(\n x=x,\n pool_size=3,\n strides=2,\n ceil_mode=True,\n name=\"features/pool{}\".format(i + 1))\n for j, out_channels in enumerate(channels_per_stage):\n expand_channels = out_channels // 2\n squeeze_channels = out_channels // 8\n x = fire_unit(\n x=x,\n in_channels=in_channels,\n squeeze_channels=squeeze_channels,\n expand1x1_channels=expand_channels,\n expand3x3_channels=expand_channels,\n residual=((residuals is not None) and (residuals[i][j] == 1)),\n name=\"features/stage{}/unit{}\".format(i + 1, j + 1))\n in_channels = out_channels\n x = nn.Dropout(\n rate=0.5,\n name=\"features/dropout\")(x)\n\n x = nn.Conv2D(\n filters=classes,\n kernel_size=1,\n name=\"output/final_conv\")(x)\n x = nn.Activation(\"relu\", name=\"output/final_activ\")(x)\n x = nn.AvgPool2D(\n pool_size=13,\n strides=1,\n name=\"output/final_pool\")(x)\n # x = nn.Flatten()(x)\n x = flatten(x)\n\n model = Model(inputs=input, outputs=x)\n model.in_size = in_size\n model.classes = classes\n return model\n\n\ndef get_squeezenet(version,\n residual=False,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".keras\", \"models\"),\n **kwargs):\n \"\"\"\n Create SqueezeNet model with specific parameters.\n\n Parameters:\n ----------\n version : str\n Version of SqueezeNet ('1.0' or '1.1').\n residual : bool, default False\n Whether to use residual connections.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.keras/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if version == '1.0':\n channels = [[128, 128, 256], [256, 384, 384, 512], [512]]\n residuals = [[0, 1, 0], [1, 0, 1, 0], [1]]\n init_block_kernel_size = 7\n init_block_channels = 96\n elif version == '1.1':\n channels = [[128, 128], [256, 256], [384, 384, 512, 512]]\n residuals = [[0, 1], [0, 1], [0, 1, 0, 1]]\n init_block_kernel_size = 3\n init_block_channels = 64\n else:\n raise ValueError(\"Unsupported SqueezeNet version {}\".format(version))\n\n if not residual:\n residuals = None\n\n net = squeezenet(\n channels=channels,\n residuals=residuals,\n init_block_kernel_size=init_block_kernel_size,\n init_block_channels=init_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef squeezenet_v1_0(**kwargs):\n \"\"\"\n SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model\n size,' https://arxiv.org/abs/1602.07360.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.keras/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenet(version=\"1.0\", residual=False, model_name=\"squeezenet_v1_0\", **kwargs)\n\n\ndef squeezenet_v1_1(**kwargs):\n \"\"\"\n SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model\n size,' https://arxiv.org/abs/1602.07360.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.keras/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenet(version=\"1.1\", residual=False, model_name=\"squeezenet_v1_1\", **kwargs)\n\n\ndef squeezeresnet_v1_0(**kwargs):\n \"\"\"\n SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and\n <0.5MB model size,' https://arxiv.org/abs/1602.07360.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.keras/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenet(version=\"1.0\", residual=True, model_name=\"squeezeresnet_v1_0\", **kwargs)\n\n\ndef squeezeresnet_v1_1(**kwargs):\n \"\"\"\n SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters\n and <0.5MB model size,' https://arxiv.org/abs/1602.07360.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.keras/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenet(version=\"1.1\", residual=True, model_name=\"squeezeresnet_v1_1\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import keras\n\n pretrained = False\n\n models = [\n squeezenet_v1_0,\n squeezenet_v1_1,\n squeezeresnet_v1_0,\n squeezeresnet_v1_1,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n # net.summary()\n weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != squeezenet_v1_0 or weight_count == 1248424)\n assert (model != squeezenet_v1_1 or weight_count == 1235496)\n assert (model != squeezeresnet_v1_0 or weight_count == 1248424)\n assert (model != squeezeresnet_v1_1 or weight_count == 1235496)\n\n if is_channels_first():\n x = np.zeros((1, 3, 224, 224), np.float32)\n else:\n x = np.zeros((1, 224, 224, 3), np.float32)\n y = net.predict(x)\n assert (y.shape == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n"
] | [
[
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Input",
"numpy.zeros",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.AvgPool2D"
]
] |
mycarta/bruges | [
"4b7dd42e96d477ffaaedd9134f9f7b7b60dd7123"
] | [
"bruges/util/util.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nUtility functions.\n\n:copyright: 2015 Agile Geoscience\n:license: Apache 2.0\n\"\"\"\nimport functools\nimport inspect\nimport warnings\n\nimport scipy.signal\nimport numpy as np\n\n\ngreek = {\n 'Alpha': 'Α',\n 'Beta': 'Β',\n 'Gamma': 'Γ',\n 'Delta': 'Δ',\n 'Epsilon': 'Ε',\n 'Zeta': 'Ζ',\n 'Eta': 'Η',\n 'Kappa': 'Κ',\n 'Lambda': 'Λ',\n 'Mu': 'Μ',\n 'Nu': 'Ν',\n 'Phi': 'Φ',\n 'Pi': 'Π',\n 'Rho': 'Ρ',\n 'Sigma': 'Σ',\n 'Tau': 'Τ',\n 'Upsilon': 'Υ',\n 'Theta': 'Θ',\n 'Chi': 'Χ',\n 'Psi': 'Ψ',\n 'Omega': 'Ω',\n 'alpha': 'α',\n 'beta': 'β',\n 'gamma': 'γ',\n 'delta': 'δ',\n 'epsilon': 'ε',\n 'zeta': 'ζ',\n 'eta': 'η',\n 'theta': 'θ',\n 'kappa': 'κ',\n 'lambda': 'λ',\n 'mu': 'μ',\n 'nu': 'ν',\n 'pi': 'π',\n 'rho': 'ρ',\n 'sigma': 'σ',\n 'tau': 'τ',\n 'upsilon': 'υ',\n 'phi': 'φ',\n 'chi': 'χ',\n 'psi': 'ψ',\n 'omega': 'ω',\n}\n\n\ndef deprecated(instructions):\n \"\"\"\n Flags a method as deprecated. This decorator can be used to mark functions\n as deprecated. It will result in a warning being emitted when the function\n is used.\n\n Args:\n instructions (str): A human-friendly string of instructions, such\n as: 'Please migrate to add_proxy() ASAP.'\n\n Returns:\n The decorated function.\n \"\"\"\n def decorator(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n message = 'Call to deprecated function {}. {}'.format(\n func.__name__,\n instructions)\n\n frame = inspect.currentframe().f_back\n\n warnings.warn_explicit(message,\n category=DeprecationWarning,\n filename=inspect.getfile(frame.f_code),\n lineno=frame.f_lineno)\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef rms(a, axis=None):\n \"\"\"\n Calculates the RMS of an array.\n\n Args:\n a (ndarray). A sequence of numbers to apply the RMS to.\n axis (int). The axis along which to compute. If not given or None,\n the RMS for the whole array is computed.\n\n Returns:\n ndarray: The RMS of the array along the desired axis or axes.\n \"\"\"\n a = np.array(a)\n if axis is None:\n div = a.size\n else:\n div = a.shape[axis]\n ms = np.sum(a**2.0, axis=axis) / div\n return np.sqrt(ms)\n\n\ndef moving_average(a, length, mode='same'):\n \"\"\"\n Computes the mean in a moving window using convolution. For an alternative,\n as well as other kinds of average (median, mode, etc.), see bruges.filters.\n\n Example:\n >>> test = np.array([1,1,9,9,9,9,9,2,3,9,2,2,np.nan,1,1,1,1])\n >>> moving_average(test, 5, mode='same')\n array([ 2.2, 4. , 5.8, 7.4, 9. , 7.6, 6.4, 6.4, 5. , 3.6, nan,\n nan, nan, nan, nan, 0.8, 0.6])\n \"\"\"\n padded = np.pad(a, int(length/2), mode='edge')\n boxcar = np.ones(int(length))/length\n smooth = np.convolve(padded, boxcar, mode='same')\n return smooth[int(length/2):-int(length/2)]\n\n\n@deprecated(\"Use bruges.filters() for moving linear and nonlinear statistics\")\ndef moving_avg_conv(a, length, mode='same'):\n \"\"\"\n Moving average via convolution. Keeping it for now for compatibility.\n \"\"\"\n boxcar = np.ones(length)/length\n return np.convolve(a, boxcar, mode=mode)\n\n\n@deprecated(\"Use bruges.filters() for moving linear and nonlinear statistics\")\ndef moving_avg_fft(a, length, mode='same'):\n \"\"\"\n Moving average via FFT convolution. Keeping it for now for compatibility.\n\n \"\"\"\n boxcar = np.ones(length)/length\n return scipy.signal.fftconvolve(a, boxcar, mode=mode)\n\n\ndef normalize(a, new_min=0.0, new_max=1.0):\n \"\"\"\n Normalize an array to [0,1] or to arbitrary new min and max.\n\n Args:\n a (ndarray): An array.\n new_min (float): The new min to scale to, default 0.\n new_max (float): The new max to scale to, default 1.\n\n Returns:\n ndarray. The normalized array.\n \"\"\"\n a = np.array(a, dtype=np.float)\n n = (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))\n return n * (new_max - new_min) + new_min\n\n\ndef nearest(a, num):\n \"\"\"\n Finds the array's nearest value to a given num.\n\n Args:\n a (ndarray): An array.\n num (float): The value to find the nearest to.\n\n Returns:\n float. The normalized array.\n \"\"\"\n a = np.array(a, dtype=float)\n return a.flat[np.abs(a - num).argmin()]\n\n\ndef next_pow2(num):\n \"\"\"\n Calculates the next nearest power of 2 to the input. Uses\n 2**ceil( log2( num ) ).\n\n Args:\n num (number): The number to round to the next power if two.\n\n Returns:\n number. The next power of 2 closest to num.\n \"\"\"\n\n return int(2**np.ceil(np.log2(num)))\n\n\ndef top_and_tail(*arrays):\n \"\"\"\n Top and tail all arrays to the non-NaN extent of the first array.\n\n E.g. crop the NaNs from the top and tail of a well log.\n\n Args:\n arrays (list): A list of arrays to treat.\n\n Returns:\n list: A list of treated arrays.\n \"\"\"\n if len(arrays) > 1:\n for arr in arrays[1:]:\n assert len(arr) == len(arrays[0])\n nans = np.where(~np.isnan(arrays[0]))[0]\n first, last = nans[0], nans[-1]\n return [array[first:last+1] for array in arrays]\n\n\ndef extrapolate(a):\n \"\"\"\n Extrapolate up and down an array from the first and last non-NaN samples.\n\n E.g. Continue the first and last non-NaN values of a log up and down.\n\n Args:\n a (ndarray): The array to treat.\n\n Returns:\n ndarray: The treated array.\n \"\"\"\n a = np.array(a)\n nans = np.where(~np.isnan(a))[0]\n first, last = nans[0], nans[-1]\n a[:first] = a[first]\n a[last + 1:] = a[last]\n return a\n"
] | [
[
"numpy.array",
"numpy.isnan",
"numpy.sum",
"numpy.ones",
"numpy.nanmin",
"numpy.sqrt",
"numpy.abs",
"numpy.log2",
"numpy.convolve"
]
] |
DanNduati/Parking-Management-System | [
"0bd9c254c49f9685b4442fbec43e36b5fb2b471b"
] | [
"tests/car_detection/car_detect.py"
] | [
"import os\nimport cv2\nimport numpy as np\nfrom os.path import join, dirname\n\n# Trained XML car classifier that describes some features of cars which we want to detect\ncascade_file = join(dirname(__file__), \"haarcascade_car.xml\")\ncars_cascade = cv2.CascadeClassifier(cascade_file)\nvideos_directory = join(os.getcwd(), \"videos\")\nvideo_file = \"cars.mp4\"\nwriter = cv2.VideoWriter_fourcc(*'MJPG')\n\n\ndef main():\n video = cv2.VideoCapture(f\"{join(videos_directory,video_file)}\")\n # create vi\n out_file = join(videos_directory, \"output.avi\")\n out = cv2.VideoWriter(out_file, writer,\n 30, (1280, 720))\n while video.isOpened():\n ret, frame = video.read()\n '''\n # get the frame dimensions to use in the VideoWriter object\n fshape = frame.shape\n fheight = fshape[0]\n fwidth = fshape[1]\n print(fwidth, fheight)\n '''\n controlkey = cv2.waitKey(1)\n if ret:\n cars = cars_cascade.detectMultiScale(frame, 1.25, 4)\n for (x, y, w, h) in cars:\n cv2.rectangle(frame, (x, y), (x+w, y+h),\n color=(0, 255, 0), thickness=2)\n out.write(frame)\n cv2.imshow('frame', frame)\n else:\n break\n\n if controlkey == ord('q'):\n break\n video.release()\n out.release()\n cv2.destroyAllWindows()\n\n\ndef test():\n # test video saving\n writer = cv2.VideoWriter(\"output.avi\",\n cv2.VideoWriter_fourcc(*\"MJPG\"), 30, (640, 480))\n for frame in range(1000):\n writer.write(np.random.randint(0, 255, (480, 640, 3)).astype('uint8'))\n writer.release()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.randint"
]
] |
djp42/IntentionPrediction | [
"9f260133f4b649e446166775b54885147d78393c"
] | [
"deprecated/signals_util.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 10 10:06:07 2016\n\n@author: LordPhillips\n\ntraffic signal utilities\n\n\"\"\"\n\nfrom utils import vehicleclass as v\nfrom utils import driver_util as dru\nfrom utils import constants as c\nfrom utils import data2_class as dd2\nfrom utils import frame_util as fut\nimport matplotlib.pyplot as plt\nimport datetime\n\n'''the NGSIM data had the intersections labelled as 87-90 for the signal\ntiming sheets, but for the trajectory data they are 1-4. This converts either way'''\ndef convertIntersection(number):\n if number > 10:\n return number-86\n else:\n return number+86\n\n'''The signals have a special timestamp that is just the hour:minute:second,\nso I have to add the constants.year,month,day to make a datetime object'''\ndef convertTimeStamp(stringTime):\n hour = int(stringTime[:2])\n minute = int(stringTime[3:5])\n second = int(stringTime[6:8])\n return datetime.datetime(c.Year,c.Month,c.Day,hour,minute,second)\n \n'''read my handmade time splits. \nreturns dictionary:\n {intersection: {time: status} }\n each intersection has an entry for every second and \n the status is a list of what 'directions' are green \n (need to look at signals constants to decode)\nDISCLAIMER -- lots of file reading heuristics specific to my file format, sorry...'''\ndef readSignalTimeSplit(filepath):\n signalFile = open(filepath, 'r')\n lines = signalFile.readlines()\n numLines = len(lines)\n lineCounter = 0\n signalDict = {}\n intersectionStarted = False\n timeChk = convertTimeStamp(\"00:00:00\")\n curInter = 0\n counter2 = 0\n for line in lines:\n lineCounter = lineCounter + 1\n if lineCounter % int(numLines/10) == 0:\n print(\"Done\", lineCounter, \"/\", numLines, \"lines...\")\n if \"Intersection\" in line:\n intersectionStarted = True\n arr = line.split()\n interNum = convertIntersection(int(arr[1]))\n signalDict[interNum] = {}\n curInter = interNum\n continue\n elif not intersectionStarted or \"TIME\" in line:\n continue\n elif line in ['\\n', '\\r\\n'] or line == '':\n print(\"Done with intersection:\",curInter)\n intersectionStarted = False\n timeChk = convertTimeStamp(\"00:00:00\")\n continue\n elif not intersectionStarted:\n continue\n elif line[0] == \"#\": # a comment or something\n continue\n if line[0] == \"+\": #at a time split\n arr = line.split()\n duration = int(arr[0][1:])\n for dt in range(0,duration):\n if timeChk.second == 59:\n if timeChk.minute == 59:\n timeChk = timeChk.replace(hour=timeChk.hour+1,minute=0,second=0)\n else:\n timeChk = timeChk.replace(minute=timeChk.minute+1,second=0)\n else:\n timeChk = timeChk.replace(second=timeChk.second+1)\n if len(arr) > 1: #if nothing, means just delay\n signalDict[curInter][timeChk] = arr[1].split(',')\n counter2 = counter2 + 1\n else: #at a cycle of 100 sec start => update time checkpoint\n arr = line.split()\n giventime = arr[0]\n timeChk = convertTimeStamp(giventime) #dont update, make new\n print(counter2)\n return signalDict\n \ndef convertDirToNum(curInter, green):\n if curInter == 1:\n if green == 'SB':\n return 2\n elif green == 'NB':\n return 6\n elif green == 'WB':\n return 4\n return 0\n elif curInter == 2:\n if green == 'SB':\n return 6\n elif green == 'NB':\n return 2\n elif green == 'WB':\n return 3\n elif green == 'EB':\n return 4\n elif green == \"SBLT\":\n return 1\n elif green == \"NBLT\":\n return 5\n return 0\n elif curInter == 3 or curInter == 4:\n if green == 'SB':\n return 2\n elif green == 'NB':\n return 6\n elif green == 'WB':\n return 3\n elif green == 'EB':\n return 4\n elif green == \"SBLT\":\n return 5\n return 0\n return -1\n \n \n'''read my handmade time splits in format 2. \nreturns dictionary:\n {intersection: {time: status} }\n the file has \"intersection\" then a line for time and then a line for \n what direction is currently green\nDISCLAIMER -- lots of file reading heuristics specific to my file format, sorry...'''\ndef readSignalTimeSplit2(filepath):\n signalFile = open(filepath, 'r')\n lines = signalFile.readlines()\n numLines = len(lines)\n lineCounter = 0\n signalDict = {}\n intersectionStarted = False\n timeChk = convertTimeStamp(\"00:00:00\")\n curInter = 0\n counter2 = 0\n line = ''\n for next_line in lines:\n lineCounter = lineCounter + 1\n if lineCounter % int(numLines/10) == 0:\n print(\"Done\", lineCounter, \"/\", numLines, \"lines...\")\n if next_line == '' or len(next_line.split()) == 0: #last entry is fluff\n print(\"Done with intersection:\",curInter)\n intersectionStarted = False\n timeChk = convertTimeStamp(\"00:00:00\")\n curInter = 0\n elif \"Intersection\" in line:\n intersectionStarted = True\n arr = line.split()\n interNum = int(arr[1])\n signalDict[interNum] = {}\n curInter = interNum\n elif intersectionStarted and not line[0] == '#': #line.split()[0] = time\n arr = line.split()\n giventime = arr[0]\n timeChk = convertTimeStamp(giventime) #dont update, make new\n nextarr = next_line.split()\n nextime = nextarr[0]\n nextdtime = convertTimeStamp(nextime)\n duration = int((nextdtime-timeChk).total_seconds())\n for dt in range(0,duration-1): #1 second delay seems fine\n if timeChk.second == 59:\n if timeChk.minute == 59:\n timeChk = timeChk.replace(hour=timeChk.hour+1,minute=0,second=0)\n else:\n timeChk = timeChk.replace(minute=timeChk.minute+1,second=0)\n else:\n timeChk = timeChk.replace(second=timeChk.second+1)\n if len(arr) > 1: #if nothing, means just delay\n greens = arr[1].split(',')\n newgreens = []\n for green in greens:\n newgreens.append(convertDirToNum(curInter, green))\n signalDict[curInter][timeChk] = newgreens\n counter2 = counter2 + 1\n line = next_line\n print(counter2)\n return signalDict\n \n \n'''will visualize the data with the status of each light.\nFor validation purposes does not display frame until user enters new line.\nDoes every 5th frame, aka every half second. This allows for less frames. '''\ndef visualizeForValidation(filepath, signalFilepath=None, formattype=1):\n driver_data = dd2.data2(filepath)\n if formattype == 1:\n signalDict = readSignalTimeSplit(signalFilepath)\n elif formattype == 2:\n signalDict = readSignalTimeSplit2(signalFilepath)\n else:\n signalDict = {}\n frames = driver_data.getFrames()\n useFrames = frames[0::10]\n wrongFrames = {}\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 3\n fig_size[1] = 10\n plt.rcParams[\"figure.figsize\"] = fig_size\n plt.figure(1)\n index = 0\n while (True):\n fid = useFrames[index]\n timemark = visualize_frame_and_signals(driver_data.frameDict[fid], signalDict, fid)\n command = input(\"Press Enter to continue, q to quit, m to mark frame for review: \")\n if 'goto' in command:\n gofid = int(command.split()[1])\n if gofid in useFrames:\n index = useFrames.index(gofid)\n else:\n print(\"Invalid frame\")\n else:\n index = index + 1\n if command == 'q':\n break\n if 'print time' in command:\n print(timemark)\n if 'm - ' in command: #add a message\n wrongFrames[fid] = timemark\n if len(command) > 1:\n wrongFrames[fid] = [timemark, command[1:]]\n print(\"fid set to:\",fid) \n print(wrongFrames)\n return wrongFrames\n\ndef visualize_frame_and_signals(frame, signalDict, fid):\n timeStr = v.vehicle(frame[list(frame.keys())[0]]).getGlobalT()\n frame_date_time = dru.convertNGSIMtimestampToDateTime(timeStr)\n #signals['green'].x/y, signals['red'].x/y ---- NOTE assumes no yellows...\n signalCordsColors = {'green':{'x':[], 'y':[] } , 'red':{'x':[],'y':[]} }\n for intersection in signalDict.keys():\n if not frame_date_time in signalDict[intersection].keys():\n mindis = 999999\n closest = None\n for date_time in signalDict[intersection]:\n dist = abs(frame_date_time.hour - date_time.hour) * 60 * 60\n dist = dist + abs(frame_date_time.minute - date_time.minute) * 60\n dist = dist + abs(frame_date_time.second - date_time.second)\n dist = dist + abs(frame_date_time.microsecond - date_time.microsecond) * 0.001\n if dist < mindis:\n closest = date_time\n mindis = dist\n greens = signalDict[intersection][closest]\n else:\n greens = signalDict[intersection][frame_date_time]\n cords = getCords(intersection)\n for ident in greens:\n ident = int(ident)\n if ident not in cords.keys():\n print(\"Not a light\")\n continue #not a light, likely ped signal\n signalCordsColors['green']['x'].append(cords[ident][0])\n signalCordsColors['green']['y'].append(cords[ident][1])\n for key in cords.keys():\n if str(key) not in greens and key not in greens:\n signalCordsColors['red']['x'].append(cords[key][0])\n signalCordsColors['red']['y'].append(cords[key][1]) \n fut.plotFrame(frame, fid, signals=signalCordsColors)\n plt.clf()\n return frame_date_time\n\ndef getCords(intersectionNumber):\n if intersectionNumber == 1:\n return {2: (-10,105), 4: (40,85), 6: (15,70)}\n elif intersectionNumber == 2:\n return {1:(-10,510), 2: (20,400), 3:(40,475), 4: (-45,440), 5:(10,400), 6: (-20,500)}\n elif intersectionNumber == 3:\n return {2: (-25,1100), 3:(40,1090), 4: (-45,1060), 5:(-15,1100), 6: (10,1045)}\n elif intersectionNumber == 4:\n return {2: (-15,1600), 3:(40,1590), 4: (-45,1560), 5:(-5,1600), 6: (10,1545)}\n"
] | [
[
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure"
]
] |
BinhMisfit/vietnamese-punctuation-prediction | [
"c9ae9f89fc61e47fce08238c6aad88e9e69e74c8"
] | [
"Vietnamese_newspapers/train_BiLSTM_CRF__focal_loss_model.py"
] | [
"from preprocessing import process_data\r\nfrom model import BiLSTM_model, batchnize_dataset, BiLSTM_CRF_model,BiLSTM_Attention_model\r\n# dataset path\r\nraw_path = 'dataset/Cleansed_data'\r\nsave_path = \"dataset/Encoded_data\"\r\n# embedding path\r\nWord2vec_path = \"embeddings\"\r\n\r\nchar_lowercase = True\r\n# dataset for train, validate and test\r\nvocab = \"dataset/Encoded_data/vocab.json\"\r\ntrain_set = \"dataset/Encoded_data/train.json\"\r\ndev_set = \"dataset/Encoded_data/dev.json\"\r\ntest_set = \"dataset/Encoded_data/test.json\"\r\nword_embedding = \"dataset/Encoded_data/word_emb.npz\"\r\n# network parameters\r\nnum_units = 300\r\nemb_dim = 300\r\nchar_emb_dim = 52\r\nfilter_sizes = [25, 25]\r\nchannel_sizes = [5, 5]\r\n# training parameters\r\nlr = 0.001\r\nlr_decay = 0.05\r\nminimal_lr = 1e-5\r\nkeep_prob = 0.5\r\nbatch_size = 32\r\nepochs = 30\r\nmax_to_keep = 1\r\nno_imprv_tolerance = 20\r\ncheckpoint_path = \"checkpoint_BiLSTM_CRF/\"\r\nsummary_path = \"checkpoint_BiLSTM_CRF/summary/\"\r\nmodel_name = \"punctuation_model\"\r\n\r\nconfig = {\"raw_path\": raw_path,\\\r\n \"save_path\": save_path,\\\r\n \"Word2vec_path\":Word2vec_path,\\\r\n \"char_lowercase\": char_lowercase,\\\r\n \"vocab\": vocab,\\\r\n \"train_set\": train_set,\\\r\n \"dev_set\": dev_set,\\\r\n \"test_set\": test_set,\\\r\n \"word_embedding\": word_embedding,\\\r\n \"num_units\": num_units,\\\r\n \"emb_dim\": emb_dim,\\\r\n \"char_emb_dim\": char_emb_dim,\\\r\n \"filter_sizes\": filter_sizes,\\\r\n \"channel_sizes\": channel_sizes,\\\r\n \"lr\": lr,\\\r\n \"lr_decay\": lr_decay,\\\r\n \"minimal_lr\": minimal_lr,\\\r\n \"keep_prob\": keep_prob,\\\r\n \"batch_size\": batch_size,\\\r\n \"epochs\": epochs,\\\r\n \"max_to_keep\": max_to_keep,\\\r\n \"no_imprv_tolerance\": no_imprv_tolerance,\\\r\n \"checkpoint_path\": checkpoint_path,\\\r\n \"summary_path\": summary_path,\\\r\n \"model_name\": model_name}\r\n\r\n# alpha & gamma for focal loss (tune hyperparameter)\r\nalpha = 0.25\r\ngamma = 0.1\r\nimport os\r\nif not os.path.exists(config[\"save_path\"]):\r\n os.mkdir(config[\"save_path\"])\r\n process_data(config)\r\n\r\nprint(\"Load datasets...\")\r\n# used for training\r\ntrain_set = batchnize_dataset(config[\"train_set\"], config[\"batch_size\"], shuffle=True)\r\n# used for computing validate loss\r\nvalid_set = batchnize_dataset(config[\"dev_set\"], batch_size=128, shuffle=False)\r\n\r\nimport tensorflow as tf\r\ntf.reset_default_graph()\r\nprint(\"Build models...\")\r\nmodel = BiLSTM_CRF_model(config, alpha, gamma)\r\nmodel.train(train_set, valid_set)\r\n# used for computing test precision, recall and F1 scores\r\ntest_set = batchnize_dataset(config[\"test_set\"], batch_size=128, shuffle=False)\r\n# run the session\r\nmodel.restore_last_session(checkpoint_path)\r\nmodel.test(test_set)"
] | [
[
"tensorflow.reset_default_graph"
]
] |
sk-ip/dffml | [
"1ef5a169327d71baecd5eccae83ad4a9999ccad1"
] | [
"model/xgboost/examples/iris_classification.py"
] | [
"from sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\nfrom dffml import Feature, Features\nfrom dffml.noasync import train, accuracy\nfrom dffml.accuracy import ClassificationAccuracy\nfrom dffml_model_xgboost.xgbclassifier import (\n XGBClassifierModel,\n XGBClassifierModelConfig,\n)\n\niris = load_iris()\ny = iris[\"target\"]\nX = iris[\"data\"]\ntrainX, testX, trainy, testy = train_test_split(\n X, y, test_size=0.1, random_state=123\n)\n\n# Configure the model\nmodel = XGBClassifierModel(\n XGBClassifierModelConfig(\n features=Features(Feature(\"data\", float,)),\n predict=Feature(\"target\", float, 1),\n location=\"model\",\n max_depth=3,\n learning_rate=0.01,\n n_estimators=200,\n reg_lambda=1,\n reg_alpha=0,\n gamma=0,\n colsample_bytree=0,\n subsample=1,\n )\n)\n\n# Train the model\ntrain(model, *[{\"data\": x, \"target\": y} for x, y in zip(trainX, trainy)])\n\n# Assess accuracy\nscorer = ClassificationAccuracy()\nprint(\n \"Test accuracy:\",\n accuracy(\n model,\n scorer,\n *[{\"data\": x, \"target\": y} for x, y in zip(testX, testy)],\n ),\n)\nprint(\n \"Training accuracy:\",\n accuracy(\n model,\n scorer,\n *[{\"data\": x, \"target\": y} for x, y in zip(trainX, trainy)],\n ),\n)\n"
] | [
[
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_iris"
]
] |
anshumandutt/AreCELearnedYet | [
"e2286c3621dea8e4961057b6197c1e14e75aea5a"
] | [
"lecarb/workload/workload.py"
] | [
"import csv\nfrom collections import OrderedDict\nfrom typing import Dict, NamedTuple, Optional, Tuple, List, Any\nimport pickle\nimport numpy as np\n\nfrom ..dtypes import is_categorical\nfrom ..constants import DATA_ROOT, PKL_PROTO\nfrom ..dataset.dataset import Table, load_table\n\nclass Query(NamedTuple):\n \"\"\"predicate of each attritbute are conjunctive\"\"\"\n predicates: Dict[str, Optional[Tuple[str, Any]]]\n ncols: int\n\nclass Label(NamedTuple):\n cardinality: int\n selectivity: float\n\ndef new_query(table: Table, ncols) -> Query:\n return Query(predicates=OrderedDict.fromkeys(table.data.columns, None),\n ncols=ncols)\n\ndef query_2_triple(query: Query, with_none: bool=True, split_range: bool=False\n ) -> Tuple[List[int], List[str], List[Any]]:\n \"\"\"return 3 lists with same length: cols(columns names), ops(predicate operators), vals(predicate literals)\"\"\"\n cols = []\n ops = []\n vals = []\n for c, p in query.predicates.items():\n if p is not None:\n if split_range is True and p[0] == '[]':\n cols.append(c)\n ops.append('>=')\n vals.append(p[1][0])\n cols.append(c)\n ops.append('<=')\n vals.append(p[1][1])\n else:\n cols.append(c)\n ops.append(p[0])\n vals.append(p[1])\n elif with_none:\n cols.append(c)\n ops.append(None)\n vals.append(None)\n return cols, ops, vals\n\ndef query_2_sql(query: Query, table: Table, aggregate=True, split=False, dbms='postgres'):\n preds = []\n for col, pred in query.predicates.items():\n if pred is None:\n continue\n op, val = pred\n if is_categorical(table.data[col].dtype):\n val = f\"\\'{val}\\'\" if not isinstance(val, tuple) else tuple(f\"\\'{v}\\'\" for v in val)\n if op == '[]':\n if split:\n preds.append(f\"{col} >= {val[0]}\")\n preds.append(f\"{col} <= {val[1]}\")\n else:\n preds.append(f\"({col} between {val[0]} and {val[1]})\")\n else:\n preds.append(f\"{col} {op} {val}\")\n\n if dbms == 'mysql':\n return f\"SELECT {'COUNT(*)' if aggregate else '*'} FROM `{table.name}` WHERE {' AND '.join(preds)}\"\n return f\"SELECT {'COUNT(*)' if aggregate else '*'} FROM \\\"{table.name}\\\" WHERE {' AND '.join(preds)}\"\n\ndef query_2_kde_sql(query: Query, table: Table):\n preds = []\n for col, pred in query.predicates.items():\n if pred is None:\n continue\n op, val = pred\n if is_categorical(table.data[col].dtype):\n assert op =='=' and not isinstance(val, tuple), val\n val = table.columns[col].discretize(val).item()\n if op == '[]':\n preds.append(f\"{col} >= {val[0]}\")\n preds.append(f\"{col} <= {val[1]}\")\n else:\n preds.append(f\"{col} {op} {val}\")\n\n return f\"SELECT * FROM \\\"{table.name}\\\" WHERE {' AND '.join(preds)}\"\n\ndef query_2_deepdb_sql(query: Query, table: Table, aggregate=True, split=False):\n preds = []\n for col, pred in query.predicates.items():\n if pred is None:\n continue\n op, val = pred\n if op == '[]':\n val = table.columns[col].normalize(list(val))\n assert len(val) == 2, val\n if split:\n preds.append(f\"{col} >= {val[0]}\")\n preds.append(f\"{col} <= {val[1]}\")\n else:\n preds.append(f\"({col} between {val[0]} and {val[1]})\")\n else:\n val = table.columns[col].normalize(val).item()\n preds.append(f\"{col} {op} {val}\")\n\n return f\"SELECT {'COUNT(*)' if aggregate else '*'} FROM \\\"{table.name}\\\" WHERE {' AND '.join(preds)}\"\n\ndef query_2_sqls(query: Query, table: Table):\n sqls = []\n for col, pred in query.predicates.items():\n if pred is None:\n continue\n op, val = pred\n if is_categorical(table.data[col].dtype):\n val = f\"\\'{val}\\'\" if not isinstance(val, tuple) else tuple(f\"\\'{v}\\'\" for v in val)\n\n if op == '[]':\n sqls.append(f\"SELECT * FROM \\\"{table.name}\\\" WHERE {col} between {val[0]} and {val[1]}\")\n else:\n sqls.append(f\"SELECT * FROM \\\"{table.name}\\\" WHERE {col} {op} {val}\")\n return sqls\n\n\ndef query_2_vector(query: Query, table: Table, upper: int=1):\n vec = []\n for col, pred in query.predicates.items():\n if pred is None:\n vec.extend([0.0, 1.0])\n continue\n op, val = pred\n if op == '[]':\n vec.extend([table.columns[col].normalize(val[0]).item(), table.columns[col].normalize(val[1]).item()])\n elif op == '>=':\n vec.extend([table.columns[col].normalize(val).item(), 1.0])\n elif op == '<=':\n vec.extend([0.0, table.columns[col].normalize(val).item()])\n elif op == '=':\n vec.extend([table.columns[col].normalize(val).item()] * 2)\n else:\n raise NotImplementedError\n return np.array(vec) * upper\n\ndef query_2_quicksel_vector(query: Query, table: Table, discrete_cols=set()):\n vec = []\n for col_name, pred in query.predicates.items():\n if pred is None:\n vec.extend([0.0, 1.0])\n continue\n op, val = pred\n col = table.columns[col_name]\n\n # adjust predicate to a proper range for discrete columns\n if col_name in discrete_cols:\n if is_categorical(col.dtype):\n val = col.discretize(val)\n minval = 0\n maxval = col.vocab_size\n vocab = np.arange(col.vocab_size)\n else: # integer values\n minval = col.minval\n maxval = col.maxval + 1\n vocab = col.vocab\n\n if op == '=':\n val = (val, val)\n elif op == '>=':\n val = (val, maxval)\n elif op == '<=':\n val = (minval, val)\n else:\n assert op == '[]'\n\n vocab = np.append(vocab, maxval)\n # argmax return 0 if no value in array satisfies\n val0 = vocab[np.argmax(vocab >= val[0])] if val[0] < maxval else maxval\n val1 = vocab[np.argmax(vocab > val[1])] if val[1] < maxval else maxval\n assert val0 <= val1, (val0, val1)\n assert val0 >= minval and val0 <= maxval, (val0, minval, maxval)\n assert val1 >= minval and val1 <= maxval, (val1, minval, maxval)\n # normalize to [0, 1]\n vec.extend([(val0-minval)/(maxval-minval), (val1-minval)/(maxval-minval)])\n\n # directly normalize continous columns\n else:\n if op == '>=':\n vec.extend([col.normalize(val).item(), 1.0])\n elif op == '<=':\n vec.extend([0.0, col.normalize(val).item()])\n elif op == '[]':\n vec.extend([col.normalize(val[0]).item(), col.normalize(val[1]).item()])\n else:\n raise NotImplementedError\n return np.array(vec)\n\n\ndef dump_queryset(dataset: str, name: str, queryset: Dict[str, List[Query]]) -> None:\n query_path = DATA_ROOT / dataset / \"workload\"\n query_path.mkdir(exist_ok=True)\n with open(query_path / f\"{name}.pkl\", 'wb') as f:\n pickle.dump(queryset, f, protocol=PKL_PROTO)\n\ndef load_queryset(dataset: str, name: str) -> Dict[str, List[Query]]:\n query_path = DATA_ROOT / dataset / \"workload\"\n with open(query_path / f\"{name}.pkl\", 'rb') as f:\n return pickle.load(f)\n\ndef dump_labels(dataset: str, version: str, name: str, labels: Dict[str, List[Label]]) -> None:\n label_path = DATA_ROOT / dataset / \"workload\"\n with open(label_path / f\"{name}-{version}-label.pkl\", 'wb') as f:\n pickle.dump(labels, f, protocol=PKL_PROTO)\n\ndef load_labels(dataset: str, version: str, name: str) -> Dict[str, List[Label]]:\n label_path = DATA_ROOT / dataset / \"workload\"\n with open(label_path / f\"{name}-{version}-label.pkl\", 'rb') as f:\n return pickle.load(f)\n\ndef dump_sqls(dataset: str, version: str, workload: str, group: str='test'):\n table = load_table(dataset, version)\n queryset = load_queryset(dataset, workload)\n labels = load_labels(dataset, version, workload)\n\n with open('test.csv', 'w') as f:\n writer = csv.writer(f)\n for query, label in zip(queryset[group], labels[group]):\n sql = query_2_sql(query, table, aggregate=False, dbms='sqlserver')\n writer.writerow([sql, label.cardinality])\n"
] | [
[
"numpy.array",
"numpy.arange",
"numpy.argmax",
"numpy.append"
]
] |
NikDemoShow/openvino | [
"31907e51e96f1603753dc69811bdf738374ca5e6"
] | [
"model-optimizer/unit_tests/extensions/middle/InsertSelect_test.py"
] | [
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.middle.InsertSelect import AddSelectBeforeMemoryNodePattern\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom unit_tests.utils.graph import build_graph\n\n\nclass InsertSelectTests(unittest.TestCase):\n\n # graph have no splices - selects should not be inserted\n def test_insert_select_0(self):\n graph = build_graph({\n 'placeholder_1': {'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'memory': {'kind': 'op', 'op': 'Assign'},\n },\n [('placeholder_1', 'placeholder_data_1'),\n ('placeholder_data_1', 'memory')\n ],\n nodes_with_edges_only=True)\n ref_graph = graph.copy()\n AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, ref_graph, 'memory')\n self.assertTrue(flag, resp)\n\n # graph contains 1 splice with context length 5, should be inserted select with memory as counter with length 5\n def test_insert_select_1(self):\n graph = build_graph({\n 'placeholder_1': {'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])},\n 'splice_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'placeholder_2': {'kind': 'op', 'op': None},\n 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]},\n 'memory': {'kind': 'op', 'op': 'Assign', 'index': 0},\n },\n [('placeholder_1', 'placeholder_data_1'),\n ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'),\n ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'),\n ('placeholder_data_2', 'memory')\n ],\n nodes_with_edges_only=True)\n AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph)\n ref_graph = build_graph({\n 'placeholder_1': {'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])},\n 'splice_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'placeholder_2': {'kind': 'op', 'op': None},\n\n 'second_dim_mem_1': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])},\n 'second_dim_data_mem_1': {'kind': 'data'},\n 'gather_shape_mem_1': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data_mem_1': {'kind': 'data'},\n 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data': {'kind': 'data'},\n 'broadcast_mem_1': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data_mem_1': {'kind': 'data'},\n\n 'shape': {'kind': 'op', 'op': 'ShapeOf'},\n 'shape_data': {'kind': 'data'},\n 'crop_batch': {'kind': 'op', 'op': 'Crop', 'offset': int64_array([0])},\n 'crop_batch_data': {'kind': 'data'},\n 'crop_batch_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([1])},\n 'crop_batch_dim_data': {'kind': 'data'},\n 'second_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])},\n 'second_dim_data': {'kind': 'data'},\n 'gather_shape': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data': {'kind': 'data'},\n 'fill_value_ones': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data_ones': {'kind': 'data'},\n 'broadcast': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data': {'kind': 'data'},\n\n 'second_dim_mem_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([26])},\n 'second_dim_data_mem_2': {'kind': 'data'},\n 'gather_shape_mem_2': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data_mem_2': {'kind': 'data'},\n 'fill_value_ones_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data_ones_2': {'kind': 'data'},\n 'broadcast_mem_2': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data_mem_2': {'kind': 'data'},\n\n 'memory_in': {'kind': 'op', 'op': 'ReadValue', 'shape': int64_array([5])},\n 'memory_in_data': {'kind': 'data'},\n 'memory_out': {'kind': 'op', 'op': 'Assign', 'shape': int64_array([5])},\n 'memory_out_data': {'kind': 'data'},\n 'result': {'kind': 'op', 'op': 'Result'},\n 'crop_in': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 1, 'dim': 4},\n 'crop_in_data': {'kind': 'data'},\n 'crop_out': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 0, 'dim': 1},\n 'crop_out_data': {'kind': 'data'},\n 'equal': {'kind': 'op', 'op': 'Equal'},\n 'equal_data': {'kind': 'data'},\n 'select': {'kind': 'op', 'op': 'Select'},\n 'select_out_data': {'kind': 'data', 'shape': [1, 26]},\n 'const_0': {'kind': 'op', 'op': 'Const'},\n 'const_0_data': {'kind': 'data'},\n 'concat': {'kind': 'op', 'op': 'Concat'},\n 'concat_data': {'kind': 'data'},\n\n 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]},\n 'memory': {'kind': 'op', 'op': 'Assign'},\n },\n [('placeholder_1', 'placeholder_data_1'),\n ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'),\n ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'),\n ('placeholder_data_2', 'select', {'in': 1}),\n\n ('second_dim_mem_1', 'second_dim_data_mem_1'),\n ('second_dim_data_mem_1', 'gather_shape_mem_1', {'in': 1}),\n ('crop_batch_data', 'gather_shape_mem_1', {'in': 0}),\n ('gather_shape_mem_1', 'gather_shape_data_mem_1'),\n ('fill_value', 'fill_value_data'),\n ('fill_value_data', 'broadcast_mem_1', {'in': 0}),\n ('gather_shape_data_mem_1', 'broadcast_mem_1', {'in': 1}),\n ('broadcast_mem_1', 'broadcast_data_mem_1'),\n ('broadcast_data_mem_1', 'memory_in'),\n\n ('memory_in', 'memory_in_data'), ('memory_in_data', 'crop_in'),\n ('crop_in', 'crop_in_data'), ('crop_in_data', 'concat', {'in': 0}),\n\n ('second_dim_mem_2', 'second_dim_data_mem_2'),\n ('second_dim_data_mem_2', 'gather_shape_mem_2', {'in': 1}),\n ('crop_batch_data', 'gather_shape_mem_2', {'in': 0}),\n ('gather_shape_mem_2', 'gather_shape_data_mem_2'),\n ('fill_value_ones_2', 'fill_value_data_ones_2'),\n ('fill_value_data_ones_2', 'broadcast_mem_2', {'in': 0}),\n ('gather_shape_data_mem_2', 'broadcast_mem_2', {'in': 1}),\n ('broadcast_mem_2', 'broadcast_data_mem_2'),\n ('broadcast_data_mem_2', 'concat', {'in': 1}),\n\n ('concat', 'concat_data'), ('concat_data', 'memory_out'),\n ('memory_out', 'memory_out_data'), ('memory_out_data', 'result'),\n ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'),\n ('crop_out_data', 'equal', {'in': 1}), ('broadcast_data_mem_2', 'equal', {'in': 0}),\n ('equal', 'equal_data'),\n ('equal_data', 'select', {'in': 0}),\n\n ('placeholder_data_2', 'shape'), ('shape', 'shape_data'),\n ('shape_data', 'crop_batch'), ('crop_batch', 'crop_batch_data'),\n ('crop_batch_dim', 'crop_batch_dim_data'),\n ('crop_batch_dim_data', 'crop_batch', {'in': 1}),\n ('second_dim', 'second_dim_data'), ('second_dim_data', 'gather_shape', {'in': 1}),\n ('crop_batch_data', 'gather_shape', {'in': 0}), ('gather_shape', 'gather_shape_data'),\n ('fill_value_ones', 'fill_value_data_ones'),\n ('fill_value_data_ones', 'broadcast', {'in': 0}),\n ('gather_shape_data', 'broadcast', {'in': 1}), ('broadcast', 'broadcast_data'),\n ('broadcast_data', 'select', {'in': 2}),\n\n ('select', 'select_out_data'),\n ('select_out_data', 'memory')\n ],\n nodes_with_edges_only=True\n )\n\n (flag, resp) = compare_graphs(graph, ref_graph, 'memory')\n self.assertTrue(flag, resp)\n\n # graph contains 1 splice with context length 5 on the path to memory and 1 out of path,\n # should be inserted select with memory as counter with length 5\n def test_insert_select_2(self):\n graph = build_graph({\n 'placeholder_1': {'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])},\n 'splice_data_1': {'kind': 'data', 'shape': [1, 65]},\n 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])},\n 'splice_data_2': {'kind': 'data', 'shape': [1, 39]},\n 'placeholder_2': {'kind': 'op', 'op': None},\n 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]},\n 'memory': {'kind': 'op', 'op': 'Assign'},\n },\n [('placeholder_1', 'placeholder_data_1'),\n ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'),\n ('placeholder_data_1', 'splice_2'), ('splice_2', 'splice_data_2'),\n ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'),\n ('placeholder_data_2', 'memory')\n ],\n nodes_with_edges_only=True)\n AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph)\n ref_graph = build_graph({\n 'placeholder_1': {'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])},\n 'splice_data_1': {'kind': 'data', 'shape': [1, 65]},\n 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])},\n 'splice_data_2': {'kind': 'data', 'shape': [1, 39]},\n 'placeholder_2': {'kind': 'op', 'op': None},\n\n 'second_dim_mem_1': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])},\n 'second_dim_data_mem_1': {'kind': 'data'},\n 'gather_shape_mem_1': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data_mem_1': {'kind': 'data'},\n 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data': {'kind': 'data'},\n 'broadcast_mem_1': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data_mem_1': {'kind': 'data'},\n\n 'shape': {'kind': 'op', 'op': 'ShapeOf'},\n 'shape_data': {'kind': 'data'},\n 'crop_batch': {'kind': 'op', 'op': 'Crop', 'offset': int64_array([0])},\n 'crop_batch_data': {'kind': 'data'},\n 'crop_batch_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([1])},\n 'crop_batch_dim_data': {'kind': 'data'},\n 'second_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])},\n 'second_dim_data': {'kind': 'data'},\n 'gather_shape': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data': {'kind': 'data'},\n 'fill_value_ones': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data_ones': {'kind': 'data'},\n 'broadcast': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data': {'kind': 'data'},\n\n 'second_dim_mem_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([26])},\n 'second_dim_data_mem_2': {'kind': 'data'},\n 'gather_shape_mem_2': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data_mem_2': {'kind': 'data'},\n 'fill_value_ones_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data_ones_2': {'kind': 'data'},\n 'broadcast_mem_2': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data_mem_2': {'kind': 'data'},\n\n 'memory_in': {'kind': 'op', 'op': 'ReadValue', 'shape': int64_array([5])},\n 'memory_in_data': {'kind': 'data'},\n 'memory_out': {'kind': 'op', 'op': 'Assign', 'shape': int64_array([5])},\n 'memory_out_data': {'kind': 'data'},\n 'result': {'kind': 'op', 'op': 'Result'},\n 'crop_in': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 1, 'dim': 4},\n 'crop_in_data': {'kind': 'data'},\n 'crop_out': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 0, 'dim': 1},\n 'crop_out_data': {'kind': 'data'},\n 'equal': {'kind': 'op', 'op': 'Equal'},\n 'equal_data': {'kind': 'data'},\n 'select': {'kind': 'op', 'op': 'Select'},\n 'select_out_data': {'kind': 'data', 'shape': [1, 26]},\n 'const_0': {'kind': 'op', 'op': 'Const'},\n 'const_0_data': {'kind': 'data'},\n 'concat': {'kind': 'op', 'op': 'Concat'},\n 'concat_data': {'kind': 'data'},\n\n 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]},\n 'memory': {'kind': 'op', 'op': 'Assign'},\n },\n [('placeholder_1', 'placeholder_data_1'),\n ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'),\n ('placeholder_data_1', 'splice_2'), ('splice_2', 'splice_data_2'),\n ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'),\n ('placeholder_data_2', 'select', {'in': 1}),\n\n ('second_dim_mem_1', 'second_dim_data_mem_1'),\n ('second_dim_data_mem_1', 'gather_shape_mem_1', {'in': 1}),\n ('crop_batch_data', 'gather_shape_mem_1', {'in': 0}),\n ('gather_shape_mem_1', 'gather_shape_data_mem_1'),\n ('fill_value', 'fill_value_data'),\n ('fill_value_data', 'broadcast_mem_1', {'in': 0}),\n ('gather_shape_data_mem_1', 'broadcast_mem_1', {'in': 1}),\n ('broadcast_mem_1', 'broadcast_data_mem_1'),\n ('broadcast_data_mem_1', 'memory_in'),\n\n ('memory_in', 'memory_in_data'), ('memory_in_data', 'crop_in'),\n ('crop_in', 'crop_in_data'), ('crop_in_data', 'concat', {'in': 0}),\n\n ('second_dim_mem_2', 'second_dim_data_mem_2'),\n ('second_dim_data_mem_2', 'gather_shape_mem_2', {'in': 1}),\n ('crop_batch_data', 'gather_shape_mem_2', {'in': 0}),\n ('gather_shape_mem_2', 'gather_shape_data_mem_2'),\n ('fill_value_ones_2', 'fill_value_data_ones_2'),\n ('fill_value_data_ones_2', 'broadcast_mem_2', {'in': 0}),\n ('gather_shape_data_mem_2', 'broadcast_mem_2', {'in': 1}),\n ('broadcast_mem_2', 'broadcast_data_mem_2'),\n ('broadcast_data_mem_2', 'concat', {'in': 1}),\n\n ('concat', 'concat_data'), ('concat_data', 'memory_out'),\n ('memory_out', 'memory_out_data'), ('memory_out_data', 'result'),\n ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'),\n ('crop_out_data', 'equal', {'in': 1}), ('broadcast_data_mem_2', 'equal', {'in': 0}),\n ('equal', 'equal_data'),\n ('equal_data', 'select', {'in': 0}),\n\n ('placeholder_data_2', 'shape'), ('shape', 'shape_data'),\n ('shape_data', 'crop_batch'), ('crop_batch', 'crop_batch_data'),\n ('crop_batch_dim', 'crop_batch_dim_data'),\n ('crop_batch_dim_data', 'crop_batch', {'in': 1}),\n ('second_dim', 'second_dim_data'), ('second_dim_data', 'gather_shape', {'in': 1}),\n ('crop_batch_data', 'gather_shape', {'in': 0}), ('gather_shape', 'gather_shape_data'),\n ('fill_value_ones', 'fill_value_data_ones'),\n ('fill_value_data_ones', 'broadcast', {'in': 0}),\n ('gather_shape_data', 'broadcast', {'in': 1}), ('broadcast', 'broadcast_data'),\n ('broadcast_data', 'select', {'in': 2}),\n\n ('select', 'select_out_data'),\n ('select_out_data', 'memory')\n ],\n nodes_with_edges_only=True\n )\n (flag, resp) = compare_graphs(graph, ref_graph, 'memory')\n self.assertTrue(flag, resp)\n\n # graph contains 2 splices with sum context length 8 on the path to memory,\n # should be inserted select with memory as counter with length 7\n def test_insert_select_3(self):\n graph = build_graph({\n 'placeholder_1': {'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])},\n 'splice_data_1': {'kind': 'data', 'shape': [1, 65]},\n 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])},\n 'splice_data_2': {'kind': 'data', 'shape': [1, 39]},\n 'placeholder_2': {'kind': 'op', 'op': None},\n 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]},\n 'memory': {'kind': 'op', 'op': 'Assign', 'index': 0},\n },\n [('placeholder_1', 'placeholder_data_1'),\n ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'),\n ('splice_data_1', 'splice_2'), ('splice_2', 'splice_data_2'),\n ('splice_data_2', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'),\n ('placeholder_data_2', 'memory')\n ],\n nodes_with_edges_only=True)\n AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph)\n ref_graph = build_graph({\n 'placeholder_1': {'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]},\n 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])},\n 'splice_data_1': {'kind': 'data', 'shape': [1, 65]},\n 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])},\n 'splice_data_2': {'kind': 'data', 'shape': [1, 39]},\n 'placeholder_2': {'kind': 'op', 'op': None},\n\n 'second_dim_mem_1': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])},\n 'second_dim_data_mem_1': {'kind': 'data'},\n 'gather_shape_mem_1': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data_mem_1': {'kind': 'data'},\n 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data': {'kind': 'data'},\n 'broadcast_mem_1': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data_mem_1': {'kind': 'data'},\n\n 'shape': {'kind': 'op', 'op': 'ShapeOf'},\n 'shape_data': {'kind': 'data'},\n 'crop_batch': {'kind': 'op', 'op': 'Crop', 'offset': int64_array([0])},\n 'crop_batch_data': {'kind': 'data'},\n 'crop_batch_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([1])},\n 'crop_batch_dim_data': {'kind': 'data'},\n 'second_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])},\n 'second_dim_data': {'kind': 'data'},\n 'gather_shape': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data': {'kind': 'data'},\n 'fill_value_ones': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data_ones': {'kind': 'data'},\n 'broadcast': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data': {'kind': 'data'},\n\n 'second_dim_mem_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([26])},\n 'second_dim_data_mem_2': {'kind': 'data'},\n 'gather_shape_mem_2': {'kind': 'op', 'op': 'Concat'},\n 'gather_shape_data_mem_2': {'kind': 'data'},\n 'fill_value_ones_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])},\n 'fill_value_data_ones_2': {'kind': 'data'},\n 'broadcast_mem_2': {'kind': 'op', 'op': 'Broadcast'},\n 'broadcast_data_mem_2': {'kind': 'data'},\n\n 'memory_in': {'kind': 'op', 'op': 'ReadValue', 'shape': int64_array([5])},\n 'memory_in_data': {'kind': 'data'},\n 'memory_out': {'kind': 'op', 'op': 'Assign', 'shape': int64_array([5])},\n 'memory_out_data': {'kind': 'data'},\n 'result': {'kind': 'op', 'op': 'Result'},\n 'crop_in': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 1, 'dim': 4},\n 'crop_in_data': {'kind': 'data'},\n 'crop_out': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 0, 'dim': 1},\n 'crop_out_data': {'kind': 'data'},\n 'equal': {'kind': 'op', 'op': 'Equal'},\n 'equal_data': {'kind': 'data'},\n 'select': {'kind': 'op', 'op': 'Select'},\n 'select_out_data': {'kind': 'data', 'shape': [1, 26]},\n 'const_0': {'kind': 'op', 'op': 'Const'},\n 'const_0_data': {'kind': 'data'},\n 'concat': {'kind': 'op', 'op': 'Concat'},\n 'concat_data': {'kind': 'data'},\n\n 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]},\n 'memory': {'kind': 'op', 'op': 'Assign', 'index': 0},\n },\n [('placeholder_1', 'placeholder_data_1'),\n ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'),\n ('splice_data_1', 'splice_2'), ('splice_2', 'splice_data_2'),\n ('splice_data_2', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'),\n ('placeholder_data_2', 'select', {'in': 1}),\n\n ('second_dim_mem_1', 'second_dim_data_mem_1'),\n ('second_dim_data_mem_1', 'gather_shape_mem_1', {'in': 1}),\n ('crop_batch_data', 'gather_shape_mem_1', {'in': 0}),\n ('gather_shape_mem_1', 'gather_shape_data_mem_1'),\n ('fill_value', 'fill_value_data'),\n ('fill_value_data', 'broadcast_mem_1', {'in': 0}),\n ('gather_shape_data_mem_1', 'broadcast_mem_1', {'in': 1}),\n ('broadcast_mem_1', 'broadcast_data_mem_1'),\n ('broadcast_data_mem_1', 'memory_in'),\n\n ('memory_in', 'memory_in_data'), ('memory_in_data', 'crop_in'),\n ('crop_in', 'crop_in_data'), ('crop_in_data', 'concat', {'in': 0}),\n\n ('second_dim_mem_2', 'second_dim_data_mem_2'),\n ('second_dim_data_mem_2', 'gather_shape_mem_2', {'in': 1}),\n ('crop_batch_data', 'gather_shape_mem_2', {'in': 0}),\n ('gather_shape_mem_2', 'gather_shape_data_mem_2'),\n ('fill_value_ones_2', 'fill_value_data_ones_2'),\n ('fill_value_data_ones_2', 'broadcast_mem_2', {'in': 0}),\n ('gather_shape_data_mem_2', 'broadcast_mem_2', {'in': 1}),\n ('broadcast_mem_2', 'broadcast_data_mem_2'),\n ('broadcast_data_mem_2', 'concat', {'in': 1}),\n\n ('concat', 'concat_data'), ('concat_data', 'memory_out'),\n ('memory_out', 'memory_out_data'), ('memory_out_data', 'result'),\n ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'),\n ('crop_out_data', 'equal', {'in': 1}), ('broadcast_data_mem_2', 'equal', {'in': 0}),\n ('equal', 'equal_data'),\n ('equal_data', 'select', {'in': 0}),\n\n ('placeholder_data_2', 'shape'), ('shape', 'shape_data'),\n ('shape_data', 'crop_batch'), ('crop_batch', 'crop_batch_data'),\n ('crop_batch_dim', 'crop_batch_dim_data'),\n ('crop_batch_dim_data', 'crop_batch', {'in': 1}),\n ('second_dim', 'second_dim_data'), ('second_dim_data', 'gather_shape', {'in': 1}),\n ('crop_batch_data', 'gather_shape', {'in': 0}), ('gather_shape', 'gather_shape_data'),\n ('fill_value_ones', 'fill_value_data_ones'),\n ('fill_value_data_ones', 'broadcast', {'in': 0}),\n ('gather_shape_data', 'broadcast', {'in': 1}), ('broadcast', 'broadcast_data'),\n ('broadcast_data', 'select', {'in': 2}),\n\n ('select', 'select_out_data'),\n ('select_out_data', 'memory')\n ],\n nodes_with_edges_only=True\n )\n\n (flag, resp) = compare_graphs(graph, ref_graph, 'memory')\n self.assertTrue(flag, resp)\n"
] | [
[
"numpy.array"
]
] |
ZhaoJackie/fix-yahoo-finance | [
"d21b9c984572c514d73207781ecb855e49085e5b"
] | [
"fix_yahoo_finance/__init__.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/fix-yahoo-finance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\n__version__ = \"0.1.31\"\n__author__ = \"Ran Aroussi\"\n__all__ = ['download', 'Ticker', 'pdr_override',\n 'get_yahoo_crumb', 'parse_ticker_csv']\n\nimport time as _time\nimport datetime as _datetime\nimport requests as _requests\nimport multitasking as _multitasking\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\n\n_DFS = {}\n_PROGRESS_BAR = None\n\n\ndef parse_ticker_csv(csv_str, auto_adjust):\n raise DeprecationWarning('This method is deprecated')\n pass\n\n\ndef get_yahoo_crumb(force=False):\n raise DeprecationWarning('This method is deprecated')\n pass\n\n\ndef Tickers(tickers):\n tickers = tickers if isinstance(tickers, list) else tickers.split()\n ticker_objects = {}\n\n for ticker in tickers:\n ticker_objects[ticker] = Ticker(ticker)\n return ticker_objects\n\n\nclass Ticker():\n\n def __repr__(self):\n return 'Ticker object <%s>' % self.ticker\n\n def __init__(self, ticker):\n self.ticker = ticker.upper()\n self._history = None\n self._base_url = 'https://query1.finance.yahoo.com'\n\n @property\n def info(self):\n \"\"\" retreive metadata and currenct price data \"\"\"\n url = \"{}/v7/finance/quote?symbols={}\".format(\n self._base_url, self.ticker)\n r = _requests.get(url=url).json()[\"quoteResponse\"][\"result\"]\n if len(r) > 0:\n return r[0]\n return {}\n\n \"\"\"\n # @todo\n def _options(self):\n # https://query1.finance.yahoo.com/v7/finance/options/SPY\n pass\n \"\"\"\n\n @staticmethod\n def _auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df\n\n @staticmethod\n def _parse_quotes(data):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n return quotes\n\n @staticmethod\n def _parse_actions(data):\n dividends = _pd.DataFrame(columns=[\"Dividends\"])\n splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(\n splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n @property\n def dividends(self):\n if self._history is None:\n self._history = self.history(period=\"max\")\n dividends = self._history[\"Dividends\"]\n return dividends[dividends != 0]\n\n @property\n def splits(self):\n if self._history is None:\n self.history(period=\"max\")\n splits = self._history[\"Stock Splits\"]\n return splits[splits != 0]\n\n @property\n def actions(self):\n if self._history is None:\n self.history(period=\"max\")\n actions = self._history[[\"Dividends\", \"Stock Splits\"]]\n return actions[actions != 0].dropna(how='all').fillna(0)\n\n def history(self, period=\"1mo\", interval=\"1d\",\n start=None, end=None, prepost=False,\n actions=True, auto_adjust=True):\n \"\"\"\n :Parameters:\n period : str\n Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max\n Either Use period parameter or use start and end\n interval : str\n Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo\n Intraday data cannot extend last 60 days\n start: str\n Download start date string (YYYY-MM-DD) or _datetime.\n Default is 1900-01-01\n end: str\n Download end date string (YYYY-MM-DD) or _datetime.\n Default is now\n prepost : bool\n Include Pre and Post market data in results?\n Default is False\n auto_adjust: bool\n Adjust all OHLC automatically? Default is True\n \"\"\"\n\n if period is None or period.lower() == \"max\":\n if start is None:\n start = -2208988800\n elif isinstance(start, _datetime.datetime):\n start = int(_time.mktime(start.timetuple()))\n else:\n start = int(_time.mktime(\n _time.strptime(str(start), '%Y-%m-%d')))\n if end is None:\n end = int(_time.time())\n elif isinstance(end, _datetime.datetime):\n end = int(_time.mktime(end.timetuple()))\n else:\n end = int(_time.mktime(_time.strptime(str(end), '%Y-%m-%d')))\n\n params = {\"period1\": start, \"period2\": end}\n else:\n period = period.lower()\n params = {\"range\": period}\n\n params[\"interval\"] = interval.lower()\n params[\"includePrePost\"] = prepost\n params[\"events\"] = \"div,splits\"\n\n # 1) fix weired bug with Yahoo! - returning 60m for 30m bars\n if params[\"interval\"] == \"30m\":\n params[\"interval\"] = \"15m\"\n\n url = \"{}/v8/finance/chart/{}\".format(self._base_url, self.ticker)\n data = _requests.get(url=url, params=params).json()\n\n # Getting data from json\n error = data[\"chart\"][\"error\"]\n if error:\n raise ValueError(self.ticker, error[\"description\"])\n\n # quotes\n quotes = self._parse_quotes(data[\"chart\"][\"result\"][0])\n\n # 2) fix weired bug with Yahoo! - returning 60m for 30m bars\n if interval.lower() == \"30m\":\n quotes2 = quotes.resample('30T')\n quotes = _pd.DataFrame(index=quotes2.last().index, data={\n 'Open': quotes2['Open'].first(),\n 'High': quotes2['High'].max(),\n 'Low': quotes2['Low'].min(),\n 'Close': quotes2['Close'].last(),\n 'Adj Close': quotes2['Adj Close'].last(),\n 'Volume': quotes2['Volume'].sum()\n })\n try:\n quotes['Dividends'] = quotes2['Dividends'].max()\n except Exception:\n pass\n try:\n quotes['Stock Splits'] = quotes2['Dividends'].max()\n except Exception:\n pass\n\n if auto_adjust:\n quotes = self._auto_adjust(quotes)\n\n quotes = _np.round(quotes, data[\n \"chart\"][\"result\"][0][\"meta\"][\"priceHint\"])\n quotes['Volume'] = quotes['Volume'].fillna(0).astype(_np.int64)\n\n quotes.dropna(inplace=True)\n\n # actions\n dividends, splits = self._parse_actions(data[\"chart\"][\"result\"][0])\n\n # combine\n df = _pd.concat([quotes, dividends, splits], axis=1, sort=True)\n df[\"Dividends\"].fillna(0, inplace=True)\n df[\"Stock Splits\"].fillna(0, inplace=True)\n\n # index eod/intraday\n df.index = df.index.tz_localize(\"UTC\").tz_convert(\n data[\"chart\"][\"result\"][0][\"meta\"][\"exchangeTimezoneName\"])\n\n if params[\"interval\"][-1] == \"m\":\n df.index.name = \"Datetime\"\n else:\n df.index = _pd.to_datetime(df.index.date)\n df.index.name = \"Date\"\n\n self._history = df.copy()\n\n if not actions:\n df.drop(columns=[\"Dividends\", \"Stock Splits\"], inplace=True)\n\n return df\n\n\n@_multitasking.task\ndef _download_one_threaded(ticker, start=None, end=None, auto_adjust=False,\n actions=False, progress=True, period=\"max\",\n interval=\"1d\", prepost=False):\n\n global _PROGRESS_BAR, _DFS\n data = _download_one(ticker, start, end, auto_adjust, actions,\n period, interval, prepost)\n _DFS[ticker.upper()] = data\n if progress:\n _PROGRESS_BAR.animate()\n\n\ndef _download_one(ticker, start=None, end=None, auto_adjust=False,\n actions=False, period=\"max\", interval=\"1d\", prepost=False):\n\n return Ticker(ticker).history(period=period, interval=interval,\n start=start, end=end, prepost=prepost,\n actions=actions, auto_adjust=auto_adjust)\n\n\ndef download(tickers, start=None, end=None, actions=False, threads=False,\n group_by='column', auto_adjust=False, progress=True,\n period=\"max\", interval=\"1d\", prepost=False, **kwargs):\n \"\"\"Download yahoo tickers\n :Parameters:\n tickers : str, list\n List of tickers to download\n period : str\n Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max\n Either Use period parameter or use start and end\n interval : str\n Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo\n Intraday data cannot extend last 60 days\n start: str\n Download start date string (YYYY-MM-DD) or _datetime.\n Default is 1900-01-01\n end: str\n Download end date string (YYYY-MM-DD) or _datetime.\n Default is now\n group_by : str\n Group by 'ticker' or 'column' (default)\n prepost : bool\n Include Pre and Post market data in results?\n Default is False\n auto_adjust: bool\n Adjust all OHLC automatically? Default is False\n actions: bool\n Download dividend + stock splits data. Default is False\n threads: bool / int\n How many threads to use for mass downloading. Default is False\n \"\"\"\n global _PROGRESS_BAR, _DFS\n\n # create ticker list\n tickers = tickers if isinstance(tickers, list) else tickers.split()\n\n if progress:\n _PROGRESS_BAR = _ProgressBar(len(tickers), 'downloaded')\n\n # reset _DFS\n _DFS = {}\n\n # set thread count if True\n if threads is True:\n threads = min([len(tickers), _multitasking.cpu_count()])\n\n # download using threads\n if isinstance(threads, int):\n _multitasking.set_max_threads(threads)\n for i, ticker in enumerate(tickers):\n _download_one_threaded(ticker, period=period, interval=interval,\n start=start, end=end, prepost=prepost,\n actions=actions, auto_adjust=auto_adjust,\n progress=(progress and i > 0))\n while len(_DFS) < len(tickers):\n _time.sleep(0.01)\n\n # download synchronously\n else:\n for i, ticker in enumerate(tickers):\n data = _download_one(ticker, period=period, interval=interval,\n start=start, end=end, prepost=prepost,\n actions=actions, auto_adjust=auto_adjust)\n _DFS[ticker.upper()] = data\n if progress:\n _PROGRESS_BAR.animate()\n\n if progress:\n _PROGRESS_BAR.completed()\n\n data = _pd.concat(_DFS.values(), axis=1, keys=_DFS.keys())\n if group_by == 'column':\n data.columns = data.columns.swaplevel(0, 1)\n data.sort_index(level=0, axis=1, inplace=True)\n\n if len(tickers) == 1:\n data = _DFS[tickers[0]]\n return data\n\n\nclass _ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n# make pandas datareader optional\n# otherwise can be called via fix_yahoo_finance.download(...)\ndef pdr_override():\n try:\n import pandas_datareader\n pandas_datareader.data.get_data_yahoo = download\n pandas_datareader.data.get_data_yahoo_actions = download\n except Exception:\n pass\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.concat",
"numpy.round"
]
] |
phernst/TIGRE | [
"220935ae7a6a002d64ea95ed8b6f69baae2f9d21"
] | [
"Python/setup.py"
] | [
"import copy\r\nimport glob\r\nimport os\r\nfrom os.path import join as pjoin\r\nimport re\r\nimport subprocess\r\nimport sys\r\n\r\nfrom Cython.Distutils import build_ext\r\nimport numpy\r\nfrom setuptools import setup, find_packages, Extension\r\n\r\n\r\nIS_WINDOWS = sys.platform == 'win32'\r\n\r\n\r\n# Code from https://github.com/pytorch/pytorch/blob/master/torch/utils/cpp_extension.py\r\nCOMPUTE_CAPABILITY_ARGS = [ # '-gencode=arch=compute_20,code=sm_20', #deprecated\r\n #'-gencode=arch=compute_30,code=sm_30',#deprecated\r\n '-gencode=arch=compute_37,code=sm_37',\r\n '-gencode=arch=compute_52,code=sm_52',\r\n '-gencode=arch=compute_60,code=sm_60',\r\n '-gencode=arch=compute_61,code=sm_61',\r\n '-gencode=arch=compute_70,code=sm_70',\r\n '-gencode=arch=compute_75,code=sm_75',\r\n '--ptxas-options=-v', '-c',\r\n '--default-stream=per-thread',\r\n ]\r\n\r\n\r\ndef locate_cuda():\r\n \"\"\"Locate the CUDA environment on the system\r\n\r\n Returns a dict with keys 'home', 'include', and 'lib64'\r\n and values giving the absolute path to each directory.\r\n\r\n Starts by looking for the CUDA_HOME or CUDA_PATH env variable. If not found, everything\r\n is based on finding 'nvcc' in the PATH.\r\n \"\"\"\r\n # Guess #1\r\n cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')\r\n if cuda_home is None:\r\n # Guess #2\r\n try:\r\n which = 'where' if IS_WINDOWS else 'which'\r\n nvcc = subprocess.check_output(\r\n [which, 'nvcc']).decode().rstrip('\\r\\n')\r\n cuda_home = os.path.dirname(os.path.dirname(nvcc))\r\n except subprocess.CalledProcessError:\r\n # Guess #3\r\n if IS_WINDOWS:\r\n cuda_homes = glob.glob(\r\n 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')\r\n if len(cuda_homes) == 0:\r\n cuda_home = ''\r\n else:\r\n cuda_home = cuda_homes[0]\r\n else:\r\n cuda_home = '/usr/local/cuda'\r\n if not os.path.exists(cuda_home):\r\n cuda_home = None\r\n\r\n cudaconfig = {'home': cuda_home,\r\n 'include': pjoin(cuda_home, 'include'),\r\n 'lib64': pjoin(cuda_home, pjoin('lib', 'x64') if IS_WINDOWS else 'lib64')}\r\n if not all([os.path.exists(v) for v in cudaconfig.values()]):\r\n raise EnvironmentError(\r\n 'The CUDA path could not be located in $PATH, $CUDA_HOME or $CUDA_PATH. '\r\n 'Either add it to your path, or set $CUDA_HOME or $CUDA_PATH.')\r\n\r\n return cudaconfig\r\n\r\n\r\nCUDA = locate_cuda()\r\n\r\n\r\n# Obtain the numpy include directory. This logic works across numpy versions.\r\ntry:\r\n NUMPY_INCLUDE = numpy.get_include()\r\nexcept AttributeError:\r\n NUMPY_INCLUDE = numpy.get_numpy_include()\r\n\r\n\r\ndef _is_cuda_file(path):\r\n return os.path.splitext(path)[1] in ['.cu', '.cuh']\r\n\r\n\r\nCOMMON_MSVC_FLAGS = ['/MD', '/wd4819', '/EHsc']\r\n\r\n\r\nCOMMON_NVCC_FLAGS = [\r\n '-D__CUDA_NO_HALF_OPERATORS__',\r\n '-D__CUDA_NO_HALF_CONVERSIONS__',\r\n '-D__CUDA_NO_HALF2_OPERATORS__',\r\n '--expt-relaxed-constexpr'\r\n]\r\n\r\n\r\ndef _join_cuda_home(*paths):\r\n return os.path.join(CUDA['home'], *paths)\r\n\r\n\r\nclass BuildExtension(build_ext):\r\n '''\r\n A custom :mod:`Cython.Distutils` build extension .\r\n\r\n This :class:`Cython.Distutils.build_ext` subclass takes care of passing the\r\n minimum required compiler flags (e.g. ``-std=c++11``) as well as mixed\r\n C++/CUDA compilation (and support for CUDA files in general).\r\n\r\n When using :class:`BuildExtension`, it is allowed to supply a dictionary\r\n for ``extra_compile_args`` (rather than the usual list) that maps from\r\n languages (``cxx`` or ``nvcc``) to a list of additional compiler flags to\r\n supply to the compiler. This makes it possible to supply different flags to\r\n the C++ and CUDA compiler during mixed compilation.\r\n '''\r\n\r\n @classmethod\r\n def with_options(cls, **options):\r\n '''\r\n Returns an alternative constructor that extends any original keyword\r\n arguments to the original constructor with the given options.\r\n '''\r\n def init_with_options(*args, **kwargs):\r\n kwargs = kwargs.copy()\r\n kwargs.update(options)\r\n return cls(*args, **kwargs)\r\n return init_with_options\r\n\r\n def __init__(self, *args, **kwargs):\r\n build_ext.__init__(self, *args, **kwargs)\r\n self.no_python_abi_suffix = kwargs.get(\"no_python_abi_suffix\", False)\r\n\r\n def build_extensions(self):\r\n # Register .cu and .cuh as valid source extensions.\r\n self.compiler.src_extensions += ['.cu', '.cuh']\r\n # Save the original _compile method for later.\r\n if self.compiler.compiler_type == 'msvc':\r\n self.compiler._cpp_extensions += ['.cu', '.cuh']\r\n original_compile = self.compiler.compile\r\n original_spawn = self.compiler.spawn\r\n else:\r\n original_compile = self.compiler._compile\r\n\r\n def unix_wrap_compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\r\n # Copy before we make any modifications.\r\n cflags = copy.deepcopy(extra_postargs)\r\n try:\r\n original_compiler = self.compiler.compiler_so\r\n if _is_cuda_file(src):\r\n nvcc = _join_cuda_home('bin', 'nvcc')\r\n if not isinstance(nvcc, list):\r\n nvcc = [nvcc]\r\n self.compiler.set_executable('compiler_so', nvcc)\r\n if isinstance(cflags, dict):\r\n cflags = cflags['nvcc']\r\n cflags = COMMON_NVCC_FLAGS + ['--compiler-options',\r\n \"'-fPIC'\"] + cflags + COMPUTE_CAPABILITY_ARGS\r\n elif isinstance(cflags, dict):\r\n cflags = cflags['cxx']\r\n # NVCC does not allow multiple -std to be passed, so we avoid\r\n # overriding the option if the user explicitly passed it.\r\n if not any(flag.startswith('-std=') for flag in cflags):\r\n cflags.append('-std=c++11')\r\n\r\n original_compile(obj, src, ext, cc_args, cflags, pp_opts)\r\n finally:\r\n # Put the original compiler back in place.\r\n self.compiler.set_executable('compiler_so', original_compiler)\r\n\r\n def win_wrap_compile(sources,\r\n output_dir=None,\r\n macros=None,\r\n include_dirs=None,\r\n debug=0,\r\n extra_preargs=None,\r\n extra_postargs=None,\r\n depends=None):\r\n\r\n cflags = copy.deepcopy(extra_postargs)\r\n extra_postargs = None\r\n\r\n def spawn(cmd, cflags):\r\n # Using regex to match src, obj and include files\r\n src_regex = re.compile('/T(p|c)(.*)')\r\n src_list = [\r\n m.group(2) for m in (src_regex.match(elem) for elem in cmd)\r\n if m\r\n ]\r\n\r\n obj_regex = re.compile('/Fo(.*)')\r\n obj_list = [\r\n m.group(1) for m in (obj_regex.match(elem) for elem in cmd)\r\n if m\r\n ]\r\n\r\n include_regex = re.compile(r'((\\-|\\/)I.*)')\r\n include_list = [\r\n m.group(1)\r\n for m in (include_regex.match(elem) for elem in cmd) if m\r\n ]\r\n\r\n if len(src_list) >= 1 and len(obj_list) >= 1:\r\n src = src_list[0]\r\n obj = obj_list[0]\r\n if _is_cuda_file(src):\r\n nvcc = _join_cuda_home('bin', 'nvcc')\r\n if isinstance(cflags, dict):\r\n cflags = cflags['nvcc']\r\n elif not isinstance(cflags, list):\r\n cflags = []\r\n\r\n cflags = COMMON_NVCC_FLAGS + cflags + COMPUTE_CAPABILITY_ARGS\r\n for flag in COMMON_MSVC_FLAGS:\r\n cflags = ['-Xcompiler', flag] + cflags\r\n for macro in macros:\r\n if len(macro) == 2:\r\n if macro[1]==None:\r\n cflags += ['--define-macro', macro[0]]\r\n else:\r\n cflags += ['--define-macro', \"{}={}\".format(macro[0], macro[1])]\r\n elif len(macro) == 1:\r\n cflags += ['--undefine-macro', macro[0]]\r\n \r\n cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags\r\n elif isinstance(cflags, dict):\r\n cflags = COMMON_MSVC_FLAGS #+ self.cflags['cxx']\r\n cmd += cflags\r\n elif isinstance(cflags, list):\r\n cflags = COMMON_MSVC_FLAGS + cflags\r\n cmd += cflags\r\n\r\n return original_spawn(cmd)\r\n\r\n try:\r\n self.compiler.spawn = lambda cmd: spawn(cmd, cflags)\r\n return original_compile(sources, output_dir, macros,\r\n include_dirs, debug, extra_preargs,\r\n extra_postargs, depends)\r\n finally:\r\n self.compiler.spawn = original_spawn\r\n\r\n # Monkey-patch the _compile method.\r\n if self.compiler.compiler_type == 'msvc':\r\n self.compiler.compile = win_wrap_compile\r\n else:\r\n self.compiler._compile = unix_wrap_compile\r\n\r\n build_ext.build_extensions(self)\r\n\r\n def get_ext_filename(self, ext_name):\r\n # Get the original shared library name. For Python 3, this name will be\r\n # suffixed with \"<SOABI>.so\", where <SOABI> will be something like\r\n # cpython-37m-x86_64-linux-gnu. On Python 2, there is no such ABI name.\r\n # The final extension, .so, would be .lib/.dll on Windows of course.\r\n ext_filename = build_ext.get_ext_filename(self, ext_name)\r\n # If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI\r\n # component. This makes building shared libraries with setuptools that\r\n # aren't Python modules nicer.\r\n if self.no_python_abi_suffix and sys.version_info >= (3, 0):\r\n # The parts will be e.g. [\"my_extension\", \"cpython-37m-x86_64-linux-gnu\", \"so\"].\r\n ext_filename_parts = ext_filename.split('.')\r\n # Omit the second to last element.\r\n without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]\r\n ext_filename = '.'.join(without_abi)\r\n return ext_filename\r\n\r\n\r\ndef include_headers(filename_list, sdist=False):\r\n \"\"\"add hpp and h files to list if sdist is called\"\"\"\r\n if not sdist:\r\n return filename_list\r\n\r\n c_extensions = ['.cu', \".c\", \".C\", \".cc\", \".cpp\", \".cxx\", \".c++\"]\r\n header_list = []\r\n for filename in filename_list:\r\n header = list(os.path.splitext(filename))\r\n if header[1] in c_extensions:\r\n header[1] = '.hpp'\r\n header_list.append(''.join(header))\r\n\r\n filename_list += ['tigre/Source/types_TIGRE.hpp', 'tigre/Source/errors.hpp']\r\n return filename_list + header_list\r\n\r\n\r\nAx_ext = Extension('_Ax',\r\n sources=include_headers(['tigre/Source/projection.cpp',\r\n 'tigre/Source/TIGRE_common.cpp',\r\n 'tigre/Source/Siddon_projection.cu',\r\n 'tigre/Source/Siddon_projection_parallel.cu',\r\n 'tigre/Source/ray_interpolated_projection.cu',\r\n 'tigre/Source/ray_interpolated_projection_parallel.cu',\r\n 'tigre/Source/_types.pxd',\r\n 'tigre/Source/_Ax.pyx'],\r\n sdist=sys.argv[1] == \"sdist\"),\r\n define_macros=[('IS_FOR_PYTIGRE', None)],\r\n library_dirs=[CUDA['lib64']],\r\n libraries=['cudart'],\r\n language='c++',\r\n runtime_library_dirs=[CUDA['lib64']] if not IS_WINDOWS else None,\r\n include_dirs=[NUMPY_INCLUDE, CUDA['include'], 'Source'])\r\n\r\n\r\nAtb_ext = Extension('_Atb',\r\n sources=include_headers(['tigre/Source/TIGRE_common.cpp',\r\n 'tigre/Source/voxel_backprojection.cu',\r\n 'tigre/Source/voxel_backprojection2.cu',\r\n 'tigre/Source/voxel_backprojection_parallel.cu',\r\n 'tigre/Source/_types.pxd',\r\n 'tigre/Source/_Atb.pyx'],\r\n sdist=sys.argv[1] == \"sdist\"),\r\n define_macros=[('IS_FOR_PYTIGRE', None)],\r\n library_dirs=[CUDA['lib64']],\r\n libraries=['cudart'],\r\n language='c++',\r\n runtime_library_dirs=[CUDA['lib64']] if not IS_WINDOWS else None,\r\n include_dirs=[NUMPY_INCLUDE, CUDA['include'], 'tigre/Source'])\r\n\r\n\r\ntvdenoising_ext = Extension('_tvdenoising',\r\n sources=include_headers(['tigre/Source/TIGRE_common.cpp',\r\n 'tigre/Source/tvdenoising.cu',\r\n 'tigre/Source/_types.pxd',\r\n 'tigre/Source/_tvdenoising.pyx'],\r\n sdist=sys.argv[1] == \"sdist\"),\r\n define_macros=[('IS_FOR_PYTIGRE', None)],\r\n library_dirs=[CUDA['lib64']],\r\n libraries=['cudart'],\r\n language='c++',\r\n runtime_library_dirs=[CUDA['lib64']] if not IS_WINDOWS else None,\r\n include_dirs=[NUMPY_INCLUDE, CUDA['include'], 'Source'])\r\n\r\n\r\nminTV_ext = Extension('_minTV',\r\n sources=include_headers(['tigre/Source/TIGRE_common.cpp',\r\n 'tigre/Source/POCS_TV.cu',\r\n 'tigre/Source/_types.pxd',\r\n 'tigre/Source/_minTV.pyx'],\r\n sdist=sys.argv[1] == \"sdist\"),\r\n define_macros=[('IS_FOR_PYTIGRE', None)],\r\n library_dirs=[CUDA['lib64']],\r\n libraries=['cudart'],\r\n language='c++',\r\n runtime_library_dirs=[CUDA['lib64']] if not IS_WINDOWS else None,\r\n include_dirs=[NUMPY_INCLUDE, CUDA['include'], 'Source'])\r\n\r\n\r\nAwminTV_ext = Extension('_AwminTV',\r\n sources=include_headers(['tigre/Source/TIGRE_common.cpp',\r\n 'tigre/Source/POCS_TV2.cu',\r\n # 'tigre/Source/_types.pxd',\r\n 'tigre/Source/_AwminTV.pyx'],\r\n sdist=sys.argv[1] == \"sdist\"),\r\n define_macros=[('IS_FOR_PYTIGRE', None)],\r\n library_dirs=[CUDA['lib64']],\r\n libraries=['cudart'],\r\n language='c++',\r\n runtime_library_dirs=[CUDA['lib64']] if not IS_WINDOWS else None,\r\n include_dirs=[NUMPY_INCLUDE, CUDA['include'], 'Source'])\r\n\r\n\r\nsetup(name='pytigre',\r\n version='0.1.8',\r\n author='Reuben Lindroos, Sam Loescher',\r\n packages=find_packages(),\r\n scripts=['tigre/demos/launch.sh',\r\n 'tests/runscript.sh'],\r\n include_package_data=True,\r\n ext_modules=[Ax_ext, Atb_ext, tvdenoising_ext, minTV_ext, AwminTV_ext],\r\n py_modules=['tigre.py'],\r\n cmdclass={'build_ext': BuildExtension},\r\n install_requires=['Cython',\r\n 'matplotlib',\r\n 'numpy',\r\n 'scipy'],\r\n license_file='LICENSE.txt',\r\n license='BSD 3-Clause',\r\n # since the package has c code, the egg cannot be zipped\r\n zip_safe=False)\r\n"
] | [
[
"numpy.get_numpy_include",
"numpy.get_include"
]
] |
laya-laya/minimum-dominating-set | [
"6f294922c9859a0b76b57cd9e290f5f5919fefdc"
] | [
"mds.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 16 17:04:47 2020\n\n@author: layaparkavousi\n\"\"\"\n\nimport pulp \nimport networkx as nx\nfrom numpy import genfromtxt\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\n## preparing the dataset and the adjacency matrix\nmydata = genfromtxt('chrom.csv', delimiter=',')\nmydata = mydata[1:,1:]\nm = len(mydata)\n\nfor i in range(0,m):\n for j in range(0,m):\n if mydata[i][j] ==1:\n mydata[i][j] = 0\n elif mydata[i][j]==2:\n mydata[i][j] = 1\n elif mydata[i][j]==3:\n mydata[i][j] = 0.5\n \n\n## make graph from the data with NetworkX and plot it\n \nG = nx.Graph(mydata)\nplt.figure(3,figsize=(12,12)) \nnx.draw(G,node_color=\"b\", node_size=7,width=0.05)\n\n## finding minimum dominating set\n\n# define the problem\nprob = pulp.LpProblem(\"minimum_dominating_set\", pulp.LpMinimize)\n\n# define the variables\nx = pulp.LpVariable.dicts(\"x\", G.nodes(), cat=pulp.LpBinary)\n\n# define the objective function\nstart_time = time.time()\n\n\nfor (v,u) in G.edges():\n \n prob += pulp.lpSum(x)\n \n# define the constraints\nfor v in G.nodes():\n prob += x[v] + pulp.lpSum([x[u] for u in G.neighbors(v)]) >= 1\n \n \n# solve\nprob.solve()\nend_time = time.time()\nprint(\"%s seconds\" % (end_time - start_time))\n\n# display solution\nfor v in G.nodes():\n if pulp.value(x[v]) > 0.99:\n print(\"node %s selected\"%v)\n \n \nfor v in prob.variables():\n if v.varValue == 1:\n print(v.name, \"=\", v.varValue)"
] | [
[
"numpy.genfromtxt",
"matplotlib.pyplot.figure"
]
] |
TihonkovSergey/wood_coefficient | [
"853a2e5babda662f71697045875a2e24e17f9df6"
] | [
"interface.py"
] | [
"import json\n\nimport seaborn as sns\nimport streamlit as st\nimport matplotlib.pyplot as plt\nimport cv2\n\nfrom src.data.data_load import get_image_by_path\nfrom src.utils.utils import get_front_images_paths\nfrom src.data.data_preprocessing import filter_image\nfrom definitions import DATA_DIR\n\nif __name__ == '__main__':\n with open(DATA_DIR.joinpath('valid_paths.txt')) as f:\n valid_paths = f.readlines()\n valid_paths = [p.strip() for p in valid_paths]\n\n path_folder = st.selectbox('Путь:', valid_paths)\n track_dir = DATA_DIR.joinpath(f\"part_1/{path_folder}\")\n img_paths = get_front_images_paths(track_dir.joinpath(\"FrontJPG/\"))\n images = [get_image_by_path(p) for p in img_paths]\n\n front_image_number = st.slider('Номер фотографии:', 0, len(img_paths) - 1)\n img = images[front_image_number]\n\n fig, ax = plt.subplots(figsize=(10, 2))\n ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n st.pyplot(fig)\n\n filtered_image = filter_image(img)\n fig, ax = plt.subplots(figsize=(10, 2))\n ax.imshow(cv2.cvtColor(filtered_image * 255, cv2.COLOR_BGR2RGB))\n st.pyplot(fig)\n\n fig, ax = plt.subplots(figsize=(10, 2))\n\n with open(track_dir.joinpath(\"info.json\")) as file: # info\n info = json.load(file)\n labels = info[\"labels\"]\n labels = [0 if el == -1 else el for el in labels]\n\n with open(track_dir.joinpath(\"lstm_info.json\")) as file: # info\n info = json.load(file)\n lstm_labels = info[\"labels\"]\n lstm_labels = [2 if el == 1 else el for el in lstm_labels]\n\n sns.lineplot(x=range(len(labels)), y=labels, color=\"g\", linewidth=2.5, label=\"opencv\", ax=ax)\n sns.lineplot(x=range(len(labels)), y=lstm_labels, color=\"r\", linewidth=2.5, label=\"lstm\", ax=ax)\n st.pyplot(fig)\n"
] | [
[
"matplotlib.pyplot.subplots"
]
] |
yilingjia/BanditLib | [
"aab74f65d576f964e233a685e98bc6c1fd940686"
] | [
"lib/factorUCB.py"
] | [
"import numpy as np\nfrom util_functions import vectorize, matrixize\nfrom lib.BaseAlg import BaseAlg\n\n\nclass FactorUCBArticleStruct:\n def __init__(\n self,\n id,\n context_dimension,\n latent_dimension,\n lambda_,\n W,\n init=\"random\",\n context_feature=None,\n ):\n self.W = W\n self.id = id\n self.context_dimension = context_dimension\n self.latent_dimension = latent_dimension\n self.d = context_dimension + latent_dimension\n\n self.A2 = lambda_ * np.identity(n=self.latent_dimension)\n self.b2 = np.zeros(self.latent_dimension)\n self.A2Inv = np.linalg.inv(self.A2)\n\n self.count = {}\n self.time = 0\n if init == \"random\":\n self.V = np.random.rand(self.d)\n else:\n self.V = np.zeros(self.d)\n\n def updateParameters(self, user, click, userID):\n self.time += 1\n if userID in self.count:\n self.count[userID] += 1\n else:\n self.count[userID] = 1\n\n self.A2 += np.outer(\n user.CoTheta.T[userID][self.context_dimension :],\n user.CoTheta.T[userID][self.context_dimension :],\n )\n self.b2 += user.CoTheta.T[userID][self.context_dimension :] * (\n click\n - user.CoTheta.T[userID][: self.context_dimension].dot(\n self.V[: self.context_dimension]\n )\n )\n self.A2Inv = np.linalg.inv(self.A2)\n\n self.V[self.context_dimension :] = np.dot(self.A2Inv, self.b2)\n\n def getCount(self, user_id):\n if user_id in self.count:\n return self.count[user_id]\n else:\n return 0\n\n\nclass FactorUCBUserStruct:\n def __init__(\n self, context_dimension, latent_dimension, lambda_, userNum, W, init=\"random\"\n ):\n self.W = W\n self.context_dimension = context_dimension\n self.latent_dimension = latent_dimension\n self.d = context_dimension + latent_dimension\n\n self.userNum = userNum\n self.A = lambda_ * np.identity(n=self.d * userNum)\n self.CCA = np.identity(n=self.d * userNum)\n self.b = np.zeros(self.d * userNum)\n self.AInv = np.linalg.inv(self.A)\n\n self.count = []\n for i in range(userNum):\n self.count.append({})\n\n self.time = 0\n if init == \"random\":\n self.UserTheta = np.random.rand(self.d, userNum)\n self.CoTheta = np.dot(self.UserTheta, self.W)\n else:\n self.UserTheta = np.zeros(shape=(self.d, userNum))\n self.CoTheta = np.zeros(shape=(self.d, userNum))\n\n self.BigW = np.kron(np.transpose(W), np.identity(n=self.d))\n # self.U = np.zeros(self.d)\n\n def updateParameters(self, articles, clicks, userID):\n self.time += len(articles)\n for article, click in zip(articles, clicks):\n if article.id in self.count[userID]:\n self.count[userID][article.id] += 1\n else:\n self.count[userID][article.id] = 1\n X = vectorize(np.outer(article.V, self.W.T[userID]))\n self.A += np.outer(X, X)\n self.b += click * X\n\n self.AInv = np.linalg.inv(self.A)\n\n self.UserTheta = matrixize(np.dot(self.AInv, self.b), len(articles[0].V))\n self.CoTheta = np.dot(self.UserTheta, self.W)\n self.CCA = np.dot(np.dot(self.BigW, self.AInv), np.transpose(self.BigW))\n\n def getA(self):\n return self.A\n\n def getProb(self, alpha, alpha2, article, userID):\n if alpha == -1:\n alpha = 0.1 * np.sqrt(np.log(self.time + 1)) + 0.1 * (1 - 0.8 ** self.time)\n alpha2 = 0.1 * np.sqrt(np.log(article.time + 1)) + 0.1 * (\n 1 - 0.8 ** article.time\n )\n\n TempFeatureM = np.zeros(shape=(len(article.V), self.userNum))\n TempFeatureM.T[userID] = article.V\n TempFeatureV = vectorize(TempFeatureM)\n\n mean = np.dot(self.CoTheta.T[userID], article.V)\n var = np.sqrt(np.dot(np.dot(TempFeatureV, self.CCA), TempFeatureV))\n var2 = np.sqrt(\n np.dot(\n np.dot(self.CoTheta.T[userID][self.context_dimension :], article.A2Inv),\n self.CoTheta.T[userID][self.context_dimension :],\n )\n )\n pta = mean + alpha * var + alpha2 * var2\n return pta\n\n def getProb_plot(self, alpha, alpha2, article, userID):\n TempFeatureM = np.zeros(shape=(len(article.V), self.userNum))\n TempFeatureM.T[userID] = article.V\n TempFeatureV = vectorize(TempFeatureM)\n\n mean = np.dot(self.CoTheta.T[userID], article.V)\n var = np.sqrt(np.dot(np.dot(TempFeatureV, self.CCA), TempFeatureV))\n var2 = np.sqrt(\n np.dot(\n np.dot(self.CoTheta.T[userID][self.context_dimension :], article.A2Inv),\n self.CoTheta.T[userID][self.context_dimension :],\n )\n )\n pta = mean + alpha * var + alpha2 * var2\n return pta, mean, alpha * var\n\n def getCount(self, article_id, userID):\n if article_id in self.count[userID]:\n return self.count[userID][article_id]\n else:\n return 0\n\n\nclass FactorUCBAlgorithm(BaseAlg):\n def __init__(\n self, arg_dict, init=\"random\", window_size=1, max_window_size=10\n ): # n is number of users\n BaseAlg.__init__(self, arg_dict)\n self.d = self.dimension + self.latent_dimension\n\n self.USERS = FactorUCBUserStruct(\n self.dimension, self.latent_dimension, self.lambda_, self.n, self.W, init\n )\n self.articles = []\n for i in range(self.itemNum):\n self.articles.append(\n FactorUCBArticleStruct(\n i, self.dimension, self.latent_dimension, self.lambda_, self.W, init\n )\n )\n\n if window_size == -1:\n self.increase_window = True\n self.window_size = 1\n else:\n self.increase_window = False\n self.window_size = window_size\n self.max_window_size = max_window_size\n self.window = []\n self.time = 0\n\n def decide(self, pool_articles, userID, k=1):\n articles = []\n for i in range(k):\n maxPTA = float(\"-inf\")\n articlePicked = None\n\n for x in pool_articles:\n self.articles[x.id].V[: self.dimension] = x.contextFeatureVector[\n : self.dimension\n ]\n x_pta = self.USERS.getProb(\n self.alpha, self.alpha2, self.articles[x.id], userID\n )\n\n # pick article with highest Prob\n # print x_pta\n if maxPTA < x_pta and x not in articles:\n articlePicked = x\n maxPTA = x_pta\n articles.append(articlePicked)\n return articles\n\n def getProb(self, pool_articles, userID):\n means = []\n vars = []\n for x in pool_articles:\n self.articles[x.id].V[: self.dimension] = x.contextFeatureVector[\n : self.dimension\n ]\n x_pta, mean, var = self.USERS.getProb_plot(\n self.alpha, self.alpha2, self.articles[x.id], userID\n )\n means.append(mean)\n vars.append(var)\n return means, vars\n\n def updateParameters(self, articlePicked, click, userID):\n self.time += 1\n self.window.append((articlePicked, click, userID))\n if len(self.window) % self.window_size == 0:\n articles = []\n clicks = []\n for articlePicked, click, userID in self.window:\n articles.append(self.articles[articlePicked.id])\n clicks.append(click)\n self.USERS.updateParameters(articles, clicks, userID)\n for articlePicked, click, userID in self.window:\n self.articles[articlePicked.id].updateParameters(\n self.USERS, click, userID\n )\n self.window = []\n if self.increase_window == True:\n self.window_size = min(self.window_size + 1, self.max_window_size)\n\n def increaseWindowSize(self):\n self.window_size = min(self.window_size + 1, self.max_window_size)\n\n def getCoTheta(self, userID):\n return self.USERS.CoTheta.T[userID]\n\n def getTheta(self, userID):\n return self.USERS.UserTheta.T[userID]\n\n def getV(self, articleID):\n return self.articles[articleID].V\n"
] | [
[
"numpy.dot",
"numpy.random.rand",
"numpy.zeros",
"numpy.log",
"numpy.identity",
"numpy.transpose",
"numpy.outer",
"numpy.linalg.inv"
]
] |
TachibanaET/CODSUG2 | [
"e3c064fd59067d88d2899a7da36fe5c83bac9537"
] | [
"source/model_fine_tune.py"
] | [
"import json\nimport os\nimport numpy as np\nimport argparse\nfrom tqdm import tqdm\nfrom utility.encode_bpe import BPEEncoder_ja\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel\nfrom transformers import AdamW, get_linear_schedule_with_warmup\nfrom torch.utils.data import Dataset, DataLoader\nimport requests\n\nclass GPT2FineTune():\n def __init__(self, h_params, bpe_encoder):\n # print(bpe_encoder)\n self.h_params = h_params\n self.tokenizer = bpe_encoder\n self.pre_trained_model = GPT2LMHeadModel.from_pretrained(self.h_params['model_path'])\n\n def fine_tune(self):\n torch.backends.cudnn.benchmark = True\n self.pre_trained_model = self.model.to(self.h_params['device'])\n\n if self.h_params['device'] == 'cuda':\n self.pre_trained_model = torch.nn.DataParallel(self.pre_trained_model)\n\n self.model.train()\n \n\nif __name__ == '__main__':\n h_params = {\n 'temperature' : 1,\n 'top_k' : 40,\n 'top_p' : 0.9,\n 'batch_size' : 64,\n 'epochs' : 50,\n 'learning_rate' : 1e-4,\n 'warmup_steps' : 5000,\n 'max_seq_len' : 256,\n 'device' : 'cuda' if torch.cuda.is_available() else 'cpu',\n 'model_path' : '/workspace/source/models/gpt2-pytorch-model-medium/',\n }\n\n with open('ja-bpe.txt') as f:\n bpe = f.read().split('\\n')\n\n with open('emoji.json') as f:\n emoji = json.loads(f.read())\n\n bpe_encoder = BPEEncoder_ja(bpe, emoji)\n gpt2_fine_tune = GPT2FineTune(\n h_params = h_params,\n bpe_encoder = bpe_encoder\n )\n"
] | [
[
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
] |
zdbzdb1212/hyppo | [
"5092beedec0a0c13ffa69f7a77f4ee30f3294256"
] | [
"hyppo/ksample/manova.py"
] | [
"import numpy as np\nfrom numba import jit\nfrom scipy.stats import f\n\nfrom ._utils import _CheckInputs\nfrom .base import KSampleTest, KSampleTestOutput\n\n\nclass MANOVA(KSampleTest):\n r\"\"\"\n Multivariate analysis of variance (MANOVA) test statistic and p-value.\n\n MANOVA is the current standard for multivariate `k`-sample testing.\n\n Notes\n -----\n The test statistic is formulated as below\n :footcite:p:`pandaNonparMANOVAIndependence2021`:\n\n In MANOVA, we are testing if the mean vectors of each of the `k`-samples are the\n same. Define\n :math:`\\{ {x_1}_i \\stackrel{iid}{\\sim} F_{X_1},\\ i = 1, ..., n_1 \\}`,\n :math:`\\{ {x_2}_j \\stackrel{iid}{\\sim} F_{X_2},\\ j = 1, ..., n_2 \\}`, ... as `k`\n groups\n of samples deriving from different a multivariate Gaussian distribution with the\n same dimensionality and same covariance matrix.\n That is, the null and alternate hypotheses are,\n\n .. math::\n\n H_0 &: \\mu_1 = \\mu_2 = \\cdots = \\mu_k, \\\\\n H_A &: \\exists \\ j \\neq j' \\text{ s.t. } \\mu_j \\neq \\mu_{j'}\n\n Let :math:`\\bar{x}_{i \\cdot}` refer to the columnwise means of :math:`x_i`; that is,\n :math:`\\bar{x}_{i \\cdot} = (1/n_i) \\sum_{j=1}^{n_i} x_{ij}`. The pooled sample\n covariance of each group, :math:`W`, is\n\n .. math::\n\n W = \\sum_{i=1}^k \\sum_{j=1}^{n_i} (x_{ij} - \\bar{x}_{i\\cdot}\n (x_{ij} - \\bar{x}_{i\\cdot})^T\n\n Next, define :math:`B` as the sample covariance matrix of the means. If\n :math:`n = \\sum_{i=1}^k n_i` and the grand mean is\n :math:`\\bar{x}_{\\cdot \\cdot} = (1/n) \\sum_{i=1}^k \\sum_{j=1}^{n} x_{ij}`,\n\n .. math::\n\n B = \\sum_{i=1}^k n_i (\\bar{x}_{i \\cdot} - \\bar{x}_{\\cdot \\cdot})\n (\\bar{x}_{i \\cdot} - \\bar{x}_{\\cdot \\cdot})^T\n\n Some of the most common statistics used when performing MANOVA include the Wilks'\n Lambda, the Lawley-Hotelling trace, Roy's greatest root, and\n Pillai-Bartlett trace (PBT)\n :footcite:p:`bartlettNoteTestsSignificance1939`\n :footcite:p:`raoTestsSignificanceMultivariate1948`\n (PBT was chosen to be the best of these\n as it is the most conservative\n :footcite:p:`warnePrimerMultivariateAnalysis2019`) and\n :footcite:p:`everittMonteCarloInvestigation1979`\n has shown that there are\n minimal differences in statistical power among these statistics.\n Let :math:`\\lambda_1, \\lambda_2, \\ldots, \\lambda_s` refer to the eigenvalues of\n :math:`W^{-1} B`. Here :math:`s = \\min(\\nu_{B}, p)` is the minimum between the\n degrees of freedom of :math:`B`, :math:`\\nu_{B}` and :math:`p`. So, the PBT\n MANOVA test statistic can be written as,\n\n .. math::\n\n \\mathrm{MANOVA}_{n_1, \\ldots, n_k} (x, y) = \\sum_{i=1}^s\n \\frac{\\lambda_i}{1 + \\lambda_i} = \\mathrm{tr} (B (B + W)^{-1})\n\n The p-value analytically by using the F statitic. In the case of PBT, given\n :math:`m = (|p - \\nu_{B}| - 1) / 2` and :math:`r = (\\nu_{W} - p - 1) / 2`, this is:\n\n .. math::\n\n F_{s(2m + s + 1), s(2r + s + 1)} = \\frac{(2r + s + 1)\n \\mathrm{MANOVA}_{n_1, n_2} (x, y)}{(2m + s + 1) (s -\n \\mathrm{MANOVA}_{n_1, n_2} (x, y))}\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n\n def __init__(self):\n KSampleTest.__init__(self)\n\n def statistic(self, *args):\n r\"\"\"\n Calulates the MANOVA test statistic.\n\n Parameters\n ----------\n *args : ndarray\n Variable length input data matrices. All inputs must have the same\n number of dimensions. That is, the shapes must be `(n, p)` and\n `(m, p)`, ... where `n`, `m`, ... are the number of samples and `p` is\n the number of dimensions.\n\n Returns\n -------\n stat : float\n The computed MANOVA statistic.\n \"\"\"\n cmean = tuple(i.mean(axis=0) for i in args)\n gmean = np.vstack(args).mean(axis=0)\n W = _compute_w(args, cmean)\n B = _compute_b(args, cmean, gmean)\n\n stat = np.trace(B @ np.linalg.pinv(B + W))\n self.stat = stat\n\n return stat\n\n def test(self, *args):\n r\"\"\"\n Calculates the MANOVA test statistic and p-value.\n\n Parameters\n ----------\n *args : ndarray\n Variable length input data matrices. All inputs must have the same\n number of dimensions. That is, the shapes must be `(n, p)` and\n `(m, p)`, ... where `n`, `m`, ... are the number of samples and `p` is\n the number of dimensions.\n\n Returns\n -------\n stat : float\n The computed MANOVA statistic.\n pvalue : float\n The computed MANOVA p-value.\n\n Examples\n --------\n >>> import numpy as np\n >>> from hyppo.ksample import MANOVA\n >>> x = np.arange(7)\n >>> y = x\n >>> stat, pvalue = MANOVA().test(x, y)\n >>> '%.3f, %.1f' % (stat, pvalue)\n '0.000, 1.0'\n \"\"\"\n inputs = list(args)\n check_input = _CheckInputs(\n inputs=inputs,\n )\n inputs = check_input()\n\n N = np.sum([i.shape[0] for i in inputs])\n p = inputs[0].shape[1]\n nu_w = N - len(inputs)\n\n if nu_w < p:\n raise ValueError(\"Test cannot be run, degree of freedoms is off\")\n\n stat = self.statistic(*inputs)\n nu_b = len(inputs) - 1\n s = np.min([p, nu_b])\n m = (np.abs(p - nu_b) - 1) / 2\n n = (nu_w - p - 1) / 2\n num = 2 * n + s + 1\n denom = 2 * m + s + 1\n pvalue = f.sf(num / denom * stat / (s - stat), s * denom, s * num)\n self.stat = stat\n self.pvalue = pvalue\n self.null_dist = None\n\n return KSampleTestOutput(stat, pvalue)\n\n\n@jit(nopython=True, cache=True)\ndef _compute_w(inputs, cmean): # pragma: no cover\n \"\"\"Calculate the W matrix\"\"\"\n\n p = list(inputs)[0].shape[1]\n W = np.zeros((p, p))\n\n for i in range(len(inputs)):\n for j in range(inputs[i].shape[0]):\n W += (inputs[i][j, :] - cmean[i]) @ (inputs[i][j, :] - cmean[i]).T\n\n return W\n\n\n@jit(nopython=True, cache=True)\ndef _compute_b(inputs, cmean, gmean): # pragma: no cover\n \"\"\"Calculate the B matrix\"\"\"\n\n p = list(inputs)[0].shape[1]\n B = np.zeros((p, p))\n\n for i in range(len(inputs)):\n n = inputs[i].shape[0]\n B += n * (cmean[i] - gmean) @ (cmean[i] - gmean).T\n\n return B\n"
] | [
[
"numpy.zeros",
"numpy.sum",
"numpy.linalg.pinv",
"scipy.stats.f.sf",
"numpy.min",
"numpy.abs",
"numpy.vstack"
]
] |
citrusjunos/mwe_aware_dependency_parsing | [
"4b3aca38463293e0f819582e117a8878465b20e5"
] | [
"hmtl/models/relation_extraction.py"
] | [
"# coding: utf-8\n\nimport logging\nimport math\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable # from torch.nn.parameter import Parameter, Variable\n\nfrom overrides import overrides\n\nfrom allennlp.common import Params\nfrom allennlp.data import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder\nfrom allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\nfrom allennlp.nn import util\n\nfrom hmtl.training.metrics import RelationF1Measure\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\n# Mapping specific to the dataset used in our setting (ACE2005)\n# Please adapt it if necessary\nrel_type_2_idx = {\"ORG-AFF\": 0, \"PHYS\": 1, \"ART\": 2, \"PER-SOC\": 3, \"PART-WHOLE\": 4, \"GEN-AFF\": 5}\nidx_2_rel_type = {value: key for key, value in rel_type_2_idx.items()}\n\n\[email protected](\"relation_extractor\")\nclass RelationExtractor(Model):\n \"\"\"\n\tA class containing the scoring model for relation extraction.\n\tIt is derived the model proposed by Bekoulis G. in \n\t\"Joint entity recognition and relation extraction as a multi-head selection problem\"\n\thttps://bekou.github.io/papers/eswa2018b/bekoulis_eswa_2018b.pdf\n\t\n\tParameters\n\t----------\n\tvocab: ``allennlp.data.Vocabulary``, required.\n The vocabulary fitted on the data.\n\ttext_field_embedder : ``TextFieldEmbedder``, required\n Used to embed the ``text`` ``TextField`` we get as input to the model.\n context_layer : ``Seq2SeqEncoder``, required\n This layer incorporates contextual information for each word in the document.\n\td: ``int``, required\n\t\tThe (half) dimension of embedding given\tby the encoder context_layer.\n\tl: ``int``, required\n\t\tThe dimension of the relation extractor scorer embedding.\n\tn_classes: ``int``, required\n\t\tThe number of different possible relation classes.\n\tactivation: ``str``, optional (default = \"relu\")\n\t\tNon-linear activation function for the scorer. Can be either \"relu\" or \"tanh\".\n\tlabel_namespace: ``str``, optional (default = \"relation_ace_labels\")\n\t\tThe namespace for the labels of the task of relation extraction.\n\t\"\"\"\n\n def __init__(\n self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n context_layer: Seq2SeqEncoder,\n d: int,\n l: int,\n n_classes: int,\n activation: str = \"relu\",\n label_namespace: str = \"relation_ace_labels\",\n ) -> None:\n super(RelationExtractor, self).__init__(vocab)\n\n self._U = nn.Parameter(torch.Tensor(2 * d, l))\n self._W = nn.Parameter(torch.Tensor(2 * d, l))\n self._V = nn.Parameter(torch.Tensor(l, n_classes))\n self._b = nn.Parameter(torch.Tensor(l))\n\n self.init_weights()\n\n self._n_classes = n_classes\n self._activation = activation\n\n self._text_field_embedder = text_field_embedder\n self._context_layer = context_layer\n\n self._label_namespace = label_namespace\n\n self._relation_metric = RelationF1Measure()\n\n self._loss_fn = nn.BCEWithLogitsLoss()\n\n def init_weights(self) -> None:\n \"\"\"\n\t\tInitialization for the weights of the model.\n\t\t\"\"\"\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)\n\n def multi_class_cross_entropy_loss(self, scores, labels, mask):\n \"\"\"\n\t\tCompute the loss from\n\t\t\"\"\"\n # Compute the mask before computing the loss\n # Transform the mask that is at the sentence level (#Size: n_batches x padded_document_length)\n # to a suitable format for the relation labels level\n padded_document_length = mask.size(1)\n mask = mask.float() # Size: n_batches x padded_document_length\n squared_mask = torch.stack([e.view(padded_document_length, 1) * e for e in mask], dim=0)\n squared_mask = squared_mask.unsqueeze(-1).repeat(\n 1, 1, 1, self._n_classes\n ) # Size: n_batches x padded_document_length x padded_document_length x n_classes\n\n # The scores (and gold labels) are flattened before using\n # the binary cross entropy loss.\n # We thus transform\n flat_size = scores.size()\n scores = scores * squared_mask # Size: n_batches x padded_document_length x padded_document_length x n_classes\n scores_flat = scores.view(\n flat_size[0], flat_size[1], flat_size[2] * self._n_classes\n ) # Size: n_batches x padded_document_length x (padded_document_length x n_classes)\n labels = labels * squared_mask # Size: n_batches x padded_document_length x padded_document_length x n_classes\n labels_flat = labels.view(\n flat_size[0], flat_size[1], flat_size[2] * self._n_classes\n ) # Size: n_batches x padded_document_length x (padded_document_length x n_classes)\n\n loss = self._loss_fn(scores_flat, labels_flat)\n\n # Amplify the loss to actually see something...\n return 100 * loss\n\n @overrides\n def forward(self, text: Dict[str, torch.LongTensor], relations: torch.IntTensor = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n\t\tForward pass of the model.\n\t\tCompute the predictions and the loss (if labels are available).\n\t\t\n\t\tParameters:\n\t\t----------\n\t\ttext: Dict[str, torch.LongTensor]\n\t\t\tThe input sentences which have transformed into indexes (integers) according to a mapping token:str -> token:int\n\t\trelations: torch.IntTensor\n\t\t\tThe gold relations to predict.\n\t\t\"\"\"\n\n # Text field embedder map the token:int to their word embedding representation token:embedding (whatever these embeddings are).\n text_embeddings = self._text_field_embedder(text)\n # Compute the mask from the text: 1 if there is actually a word in the corresponding sentence, 0 if it has been padded.\n mask = util.get_text_field_mask(text) # Size: batch_size x padded_document_length\n\n # Compute the contextualized representation from the word embeddings.\n # Usually, _context_layer is a Seq2seq model such as LSTM\n encoded_text = self._context_layer(\n text_embeddings, mask\n ) # Size: batch_size x padded_document_length x lstm_output_size\n\n ###### Relation Scorer ##############\n # Compute the relation scores\n left = torch.matmul(encoded_text, self._U) # Size: batch_size x padded_document_length x l\n right = torch.matmul(encoded_text, self._W) # Size: batch_size x padded_document_length x l\n\n left = left.permute(1, 0, 2)\n left = left.unsqueeze(3)\n right = right.permute(0, 2, 1)\n right = right.unsqueeze(0)\n\n B = left + right\n B = B.permute(1, 0, 3, 2) # Size: batch_size x padded_document_length x padded_document_length x l\n\n outer_sum_bias = B + self._b # Size: batch_size x padded_document_length x padded_document_length x l\n if self._activation == \"relu\":\n activated_outer_sum_bias = F.relu(outer_sum_bias)\n elif self._activation == \"tanh\":\n activated_outer_sum_bias = F.tanh(outer_sum_bias)\n\n relation_scores = torch.matmul(\n activated_outer_sum_bias, self._V\n ) # Size: batch_size x padded_document_length x padded_document_length x n_classes\n #################################################################\n\n batch_size, padded_document_length = mask.size()\n\n relation_sigmoid_scores = torch.sigmoid(\n relation_scores\n ) # F.sigmoid(relation_scores) #Size: batch_size x padded_document_length x padded_document_length x n_classes\n\n # predicted_relations[l, i, j, k] == 1 iif we predict a relation k with ARG1==i, ARG2==j in the l-th sentence of the batch\n predicted_relations = torch.round(\n relation_sigmoid_scores\n ) # Size: batch_size x padded_document_length x padded_document_length x n_classes\n\n output_dict = {\n \"relation_sigmoid_scores\": relation_sigmoid_scores,\n \"predicted_relations\": predicted_relations,\n \"mask\": mask,\n }\n\n if relations is not None:\n # Reformat the gold relations before computing the loss\n # Size: batch_size x padded_document_length x padded_document_length x n_classes\n # gold_relations[l, i, j, k] == 1 iif we predict a relation k with ARG1==i, ARG2==j in the l-th sentence of the batch\n gold_relations = torch.zeros(batch_size, padded_document_length, padded_document_length, self._n_classes)\n\n for exple_idx, exple_tags in enumerate(relations): # going through the batch\n # rel is a list of list containing the current sentence in the batch\n # each sublist in rel is of size padded_document_length\n # and encodes a relation in the sentence where the two non zeros elements\n # indicate the two words arguments AND the relation type between these two words.\n for rel in exple_tags:\n # relations have been padded, so for each sentence in the batch there are\n # max_nb_of_relations_in_batch_for_one_sentence relations ie (number of sublist such as rel)\n # The padded relations are simply list of size padded_document_length filled with 0.\n if rel.sum().item() == 0:\n continue\n\n for idx in rel.nonzero():\n label_srt = self.vocab.get_token_from_index(rel[idx].item(), self._label_namespace)\n arg, rel_type = label_srt.split(\"_\")\n if arg == \"ARG1\":\n x = idx.data[0]\n else:\n y = idx.data[0]\n\n gold_relations[exple_idx, x, y, rel_type_2_idx[rel_type]] = 1\n\n # GPU support\n if text_embeddings.is_cuda:\n gold_relations = gold_relations.cuda()\n\n # Compute the loss\n output_dict[\"loss\"] = self.multi_class_cross_entropy_loss(\n scores=relation_scores, labels=gold_relations, mask=mask\n )\n\n # Compute the metrics with the predictions.\n self._relation_metric(predictions=predicted_relations, gold_labels=gold_relations, mask=mask)\n\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, Any]:\n \"\"\"\n\t\tDecode the predictions\n\t\t\"\"\"\n decoded_predictions = []\n\n for instance_tags in output_dict[\"predicted_relations\"]:\n sentence_length = instance_tags.size(0)\n decoded_relations = []\n\n for arg1, arg2, rel_type_idx in instance_tags.nonzero().data:\n relation = [\"*\"] * sentence_length\n rel_type = idx_2_rel_type[rel_type_idx.item()]\n relation[arg1] = \"ARG1_\" + rel_type\n relation[arg2] = \"ARG2_\" + rel_type\n decoded_relations.append(relation)\n\n decoded_predictions.append(decoded_relations)\n\n output_dict[\"decoded_predictions\"] = decoded_predictions\n\n return output_dict\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n \"\"\"\n\t\tCompute the metrics for relation: precision, recall and f1.\n\t\tA relation is considered correct if we can correctly predict the last word of ARG1, the last word of ARG2 and the relation type.\n\t\t\"\"\"\n metric_dict = self._relation_metric.get_metric(reset=reset)\n return {x: y for x, y in metric_dict.items() if \"overall\" in x}\n"
] | [
[
"torch.zeros",
"torch.sigmoid",
"torch.round",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.normal_",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.relu",
"torch.Tensor",
"torch.matmul",
"torch.nn.functional.tanh"
]
] |
cthoyt-forks-and-packages/AutomatedSeriesClassification | [
"8057137619bc7b0b9692ffca2e750624e020a5db"
] | [
"AutomatedSeriesClassification/mainSeriesClassification.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 27 16:40:49 2020\n\n@author: krugefr1\n\"\"\"\n\nimport numpy as np\nimport os\ntry:\n import arthor\nexcept ImportError:\n arthor = None\nfrom rdkit import Chem\nfrom rdkit.Chem import rdSubstructLibrary\nimport pickle\nimport random\nimport pandas as pd\nimport copy\n\nfrom AutomatedSeriesClassification import UPGMAclustering, Butinaclustering, utilsDataPrep\n\nclass Classification:\n \n def __init__(self, proj, datapath, dbpath, chembldb, flimit=1e-3, MinClusterSize=20, clustering='UPGMA', calcDists=True, calcScores=False):\n self.proj=proj\n self.datapath=datapath\n self.dbpath=dbpath\n self.chembldb=chembldb\n self.flimit=flimit\n self.MinClusterSize=MinClusterSize\n self.clustering=clustering\n self.calcScores=calcScores\n self.calcDists=calcDists\n # load data\n self.moldata_proj, self.distdata_proj=utilsDataPrep.PrepareData(self.proj,self.datapath,distMeasure='Tanimoto',FP='Morgan2', calcDists=self.calcDists)\n if arthor is not None:\n if not os.path.isdir(dbpath):\n os.mkdir(dbpath) \n # set up project database for arthor substructure matching \n df=self.moldata_proj[['Structure','ID']]\n df.to_csv('./arthor/{0}.smi'.format(self.proj), header=None, index=None, sep=' ')\n os.system('smi2atdb -j 0 -t {0}{1}.smi {0}{1}.atdb'.format(self.dbpath,self.proj))\n os.system('atdb2fp -j 0 {0}{1}.atdb'.format(self.dbpath,self.proj))\n self.proj_db=arthor.SubDb('{0}{1}.atdb'.format(self.dbpath,self.proj))\n else:\n if type(dbpath)==rdSubstructLibrary.SubstructLibrary:\n self.proj_db = dbpath\n self.db_size = len(self.proj_db)\n else:\n if not os.path.exists(dbpath):\n print(\"creating database\")\n mols = rdSubstructLibrary.CachedTrustedSmilesMolHolder()\n fps = rdSubstructLibrary.PatternHolder()\n for smi in self.moldata_proj['Structure']:\n m = Chem.MolFromSmiles(smi)\n mols.AddSmiles(Chem.MolToSmiles(m))\n fps.AddFingerprint(Chem.PatternFingerprint(m))\n self.proj_db = rdSubstructLibrary.SubstructLibrary(mols,fps)\n self.db_size = len(mols)\n pickle.dump(self.proj_db,open(dbpath,'wb+'))\n else:\n self.proj_db = pickle.load(open(dbpath,'rb'))\n self.db_size = len(self.proj_db)\n\n\n def AssignSeriesToMCS(self,MCSdict):\n # assign series to MCS of selected clusters\n smartslist=[v[2] for v in MCSdict.values()]\n MolAssign_prel={}\n MolAssignment={}\n for s in range(len(smartslist)):\n if arthor is not None:\n res=self.proj_db.Search(smartslist[s])\n mols=[int(i.decode(\"utf-8\")) for i in res]\n else:\n mols = self.proj_db.GetMatches(Chem.MolFromSmarts(smartslist[s]),maxResults=self.db_size)\n MolAssign_prel[list(MCSdict.keys())[s]]=list(mols)\n \n # remove all series that are entirely in another series \n for key1 in MolAssign_prel.keys():\n add=1\n for key2 in MolAssign_prel.keys():\n if key2!=key1:\n if set(MolAssign_prel[key1]).issubset(set(MolAssign_prel[key2])):\n if set(MolAssign_prel[key2]).issubset(set(MolAssign_prel[key1])) and (MCSdict[key1][0]>=MCSdict[key2][0]):\n add=1 \n else:\n add=0\n break \n if add==1 and MolAssign_prel[key1] not in MolAssignment.values():\n MolAssignment[key1]=MolAssign_prel[key1]\n \n \n MolAssignment={k:MolAssignment[k] for k in MolAssignment.keys() if len(MolAssignment[k])>self.MinClusterSize}\n if self.calcScores:\n MCSdict={k:(MCSdict[k][0],len(MolAssignment[k]),MCSdict[k][2], MCSdict[k][3], MolAssignment[k]) for k in MolAssignment.keys()}\n else:\n MCSdict={k:(MCSdict[k][0],len(MolAssignment[k]),MCSdict[k][2], MolAssignment[k]) for k in MolAssignment.keys()}\n return MolAssignment, MCSdict\n\n \n def ApplyClustering(self):\n \n # apply custering and calculate MCS\n if self.clustering == 'UPGMA':\n MCSdict=UPGMAclustering.ApplyUPGMA(self.distdata_proj,self.moldata_proj,self.chembldb, self.flimit, self.MinClusterSize, self.calcScores)\n elif self.clustering == 'Butina':\n distdata=copy.deepcopy(self.distdata_proj)\n MCSdict=Butinaclustering.ApplyButina(distdata, self.moldata_proj, self.chembldb, self.flimit, self.MinClusterSize,self.calcScores)\n else:\n print('Clustering algorithm not implemented.')\n return\n \n # assign series through substructure matching and filtering\n self.MolAssignment, self.MCSdict = self.AssignSeriesToMCS(MCSdict)\n \n # prepare and save output\n self.moldata_proj['ClusterID']=[list() for x in range(len(self.moldata_proj.index))]\n \n for k in self.MolAssignment.keys():\n self.moldata_proj['ClusterID'].loc[self.moldata_proj['ID'].map(lambda x: x in self.MolAssignment[k])].apply(lambda x: x.append(k))\n \n if self.clustering=='UPGMA':\n self.moldata_proj.to_csv('{0}moldata_UPGMA.csv'.format(self.datapath))\n with open('{0}ClusterData_UPGMA.txt'.format(self.datapath), 'wb') as fileout:\n pickle.dump(self.MCSdict,fileout)\n elif self.clustering == 'Butina':\n self.moldata_proj.to_csv('{0}moldata_Butina.csv'.format(self.datapath))\n with open('{0}ClusterData_Butina.txt'.format(self.datapath), 'wb') as fileout:\n pickle.dump(self.MCSdict,fileout) \n else:\n print('Clustering algorithm not implemented.')\n return\n \n \n \n def CalculatePerformance(self,seriescolumn='series assignment'):\n \n # benchmark the automated classification against a different (probably human-defined) classification\n # human-defined compound assignment is specified in the column \"seriescolumn\" of the dataframe \"moldata\"\n # automated classification assignment specified in dict \"MolAssignment\"\n \n # calculates F1 score of automatically-identified series w.r.t. to all human-defined series, then links\n # each automatically-identified series to the human-defined series with highest F1 score\n \n scaflist=list(set(self.moldata_proj['scaffold'].tolist()))\n scaflist.sort()\n \n intersect_matrix=np.zeros((len(scaflist),len(self.MolAssignment)))\n NMatchScaf=[]\n NMatchCluster=np.array([len(v) for v in self.MolAssignment.values()])\n for scaf_ind in range(len(scaflist)):\n mollist=self.moldata_proj['ID'].loc[self.moldata_proj[seriescolumn].map(lambda x: scaflist[scaf_ind] in x)].tolist()\n intersect_scaf=np.array([len(list(set(mollist)&set(clusterlist))) for clusterlist in self.MolAssignment.values()])\n intersect_matrix[scaf_ind,:]=intersect_scaf\n NMatchScaf.append(len(mollist))\n \n NMatchScaf=np.array(NMatchScaf)\n RecallMatrix=intersect_matrix/NMatchScaf[:,None]\n PrecMatrix=intersect_matrix/NMatchCluster[None,:]\n Fscore=(2*RecallMatrix*PrecMatrix)/(RecallMatrix+PrecMatrix+1e-9)\n maxscore=np.argmax(Fscore,axis=0)\n \n PrecVector=np.zeros(len(self.MolAssignment))\n RecallVector=np.zeros(len(self.MolAssignment))\n FscoreVector=np.zeros(len(self.MolAssignment))\n LinkVector=[]\n \n for col in range(len(self.MolAssignment)):\n PrecVector[col]=PrecMatrix[maxscore[col],col]\n RecallVector[col]=RecallMatrix[maxscore[col],col]\n FscoreVector[col]=Fscore[maxscore[col],col]\n LinkVector.append((list(self.MolAssignment.keys())[col],scaflist[maxscore[col]]))\n\n LinkVector=np.array(LinkVector) \n self.PerformanceClusters={'recall':RecallVector,'precision':PrecVector,'Fscore':FscoreVector,'linked series':LinkVector}\n \n if self.clustering=='UPGMA':\n with open('{0}PerformanceData_UPGMA.txt'.format(self.datapath), 'wb') as fileout:\n pickle.dump(self.PerformanceClusters,fileout)\n elif self.clustering == 'Butina':\n with open('{0}PerformanceData_Butina.txt'.format(self.datapath), 'wb') as fileout:\n pickle.dump(self.PerformanceClusters,fileout) \n else:\n print('Clustering algorithm not implemented.')\n return\n \n \n def ClassificationCrossValidation(self, fraction_sample, N_sample):\n samplerange=np.arange(len(self.moldata_proj))\n invfrac=1/fraction_sample\n self.SampledSeries={}\n for i in range(N_sample):\n \n # random sampling\n random.seed((i+1)*10)\n molinds=random.sample(population=samplerange.tolist(),k=int(len(samplerange.tolist())//invfrac))\n moldata_sample=self.moldata_proj.iloc[molinds]\n distdata_sample=self.distdata_proj[molinds,:]\n distdata_sample=distdata_sample[:,molinds]\n \n # apply custering and calculate MCS\n if self.clustering == 'UPGMA':\n MCSdict_sampled=UPGMAclustering.ApplyUPGMA(distdata_sample,moldata_sample,self.chembldb, self.flimit, self.MinClusterSize, self.calcScores)\n elif self.clustering == 'Butina':\n MCSdict_sampled=Butinaclustering.ApplyButina(distdata_sample,moldata_sample, self.chembldb, self.flimit, self.MinClusterSize,self.calcScores)\n else:\n print('Clustering algorithm not implemented.')\n return\n \n # assign series through substructure matching and filtering\n MolAssignment_sampled, MCSdict_sampled = self.AssignSeriesToMCS(MCSdict_sampled)\n self.SampledSeries[i]=MCSdict_sampled\n \n if self.clustering=='UPGMA':\n with open('{0}SampledSeries{1}_UPGMA.txt'.format(self.datapath, int(fraction_sample*100)), 'wb') as fileout:\n pickle.dump(self.SampledSeries,fileout)\n elif self.clustering == 'Butina':\n with open('{0}SampledSeries{1}_Butina.txt'.format(self.datapath, int(fraction_sample*100)), 'wb') as fileout:\n pickle.dump(self.SampledSeries,fileout) \n else:\n print('Clustering algorithm not implemented.')\n return\n \n return\n \n def EvaluationCrossValidation(self):\n # Compare the classification obtained from sampling (\"SampledSeries\") against the original classification (\"MCSdict\")\n self.EvalCrossval=pd.DataFrame(columns=['series id','repetition','fscore'])\n for rep in self.SampledSeries.keys():\n rep_dict=self.SampledSeries[rep]\n keylist=[k for k in rep_dict.keys()]\n for k in self.MCSdict.keys():\n intersect=[len(set(self.MCSdict[k][-1])&set(v[-1])) for v in rep_dict.values()]\n recall=np.array([intersect[i]/len(rep_dict[keylist[i]][-1]) for i in range(len(keylist))])\n precision=np.array(intersect)/len(self.MCSdict[k][-1])\n fscore=max(2*recall*precision/(recall+precision+1e-9))\n row=[int(k),int(rep),fscore]\n self.EvalCrossval.loc[len(self.EvalCrossval)]=row\n self.EvalCrossval['series id']=self.EvalCrossval['series id'].apply(int)"
] | [
[
"pandas.DataFrame",
"numpy.array",
"numpy.argmax"
]
] |
ZiyangTian/TrajectoryForecaster | [
"63ab3d7b97ec5dfbb1250af114038a6ad5a7faa9"
] | [
"forecaster/data/dataset_utils.py"
] | [
"\"\"\" Dataset utilities. \"\"\"\nimport functools\nimport tensorflow as tf\n\n\ndef named_dataset(dataset, names, num_parallel_calls=None):\n \"\"\"Create a `Dataset` by adding nested feature names to an existed `Dataset`.\n Arguments:\n dataset: A nested `tuple` structured `Dataset`.\n names: A nested `tuple` of `str`, feature names.\n num_parallel_calls: See `tf.data.Dataset.map`.\n Returns:\n A `dict` structured `Dataset`.\n \"\"\"\n def map_fn(_names, *features):\n features_dict = {}\n for name, feature in zip(_names, features):\n if type(name) is str:\n features_dict.update({name: feature})\n elif type(name) is tuple and type(feature) is tuple:\n features_dict.update(map_fn(name, *feature))\n else:\n raise ValueError('Unmatched feature names and values: {} {}.'.format(name, feature))\n return features_dict\n\n return dataset.map(functools.partial(map_fn, names), num_parallel_calls=num_parallel_calls)\n\n\ndef feature_selected_dataset(dataset, selected_feature_names, output_is_tuple=False, num_parallel_calls=None):\n \"\"\"Create a `Dataset` by selecting features from an existed `Dataset`.\n Arguments:\n dataset: A `dict` structured `Dataset`.\n selected_feature_names: A sequence of `str`, selected feature names.\n output_is_tuple: A `bool`, if true, return a `tuple` structured `Dataset`,\n or else, a `dict` structured one.\n num_parallel_calls: See `tf.data.Dataset.map`.\n Returns:\n A `Dataset`.\n \"\"\"\n def map_fn(features):\n if output_is_tuple:\n return tuple(map(\n lambda k: features[k],\n selected_feature_names))\n return dict(map(\n lambda k: (k, features[k]),\n selected_feature_names))\n\n return dataset.map(map_fn, num_parallel_calls=num_parallel_calls)\n\n\ndef windowed_dataset(dataset, size, shift=None, stride=1, drop_remainder=True):\n \"\"\"Create a windowed `Dataset`.\n Arguments:\n dataset: A `Dataset` of output shape ((...), (...), ... (...)) or a `dict`\n of the same.\n size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements\n of the input dataset to combine into a generate.\n shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the forward\n shift of the sliding generate in each iteration. Defaults to `size`.\n stride: A `tf.int64` scalar `tf.Tensor`, representing the stride of the\n input elements in the sliding generate.\n drop_remainder:\n Returns:\n A windowed `Dataset`.\n \"\"\"\n dataset = dataset.window(size, shift=shift, stride=stride, drop_remainder=drop_remainder)\n\n def map_fn(nested_structure_of_datasets):\n \"\"\"nested_structure_of_datasets -> dataset\"\"\"\n structure_type = type(nested_structure_of_datasets)\n if structure_type is dict:\n for k, v in nested_structure_of_datasets.items():\n nested_structure_of_datasets[k] = map_fn(v)\n return tf.data.Dataset.zip(nested_structure_of_datasets)\n if structure_type is tuple:\n return tf.data.Dataset.zip(tuple(map(map_fn, nested_structure_of_datasets)))\n return nested_structure_of_datasets.batch(size)\n\n if type(dataset.element_spec) is tuple:\n return dataset.flat_map(lambda *e: map_fn(e))\n return dataset.flat_map(map_fn)\n"
] | [
[
"tensorflow.data.Dataset.zip"
]
] |
hanhtong/Effective-Instructions- | [
"a1766f300c4e613b4ce10e9b6eae1b14e43c7d60"
] | [
"instruction_env/Lib/site-packages/scipy/linalg/_decomp_qz.py"
] | [
"import warnings\n\nimport numpy as np\nfrom numpy import asarray_chkfinite\n\nfrom .misc import LinAlgError, _datacopied, LinAlgWarning\nfrom .lapack import get_lapack_funcs\n\n\n__all__ = ['qz', 'ordqz']\n\n_double_precision = ['i', 'l', 'd']\n\n\ndef _select_function(sort):\n if callable(sort):\n # assume the user knows what they're doing\n sfunction = sort\n elif sort == 'lhp':\n sfunction = _lhp\n elif sort == 'rhp':\n sfunction = _rhp\n elif sort == 'iuc':\n sfunction = _iuc\n elif sort == 'ouc':\n sfunction = _ouc\n else:\n raise ValueError(\"sort parameter must be None, a callable, or \"\n \"one of ('lhp','rhp','iuc','ouc')\")\n\n return sfunction\n\n\ndef _lhp(x, y):\n out = np.empty_like(x, dtype=bool)\n nonzero = (y != 0)\n # handles (x, y) = (0, 0) too\n out[~nonzero] = False\n out[nonzero] = (np.real(x[nonzero]/y[nonzero]) < 0.0)\n return out\n\n\ndef _rhp(x, y):\n out = np.empty_like(x, dtype=bool)\n nonzero = (y != 0)\n # handles (x, y) = (0, 0) too\n out[~nonzero] = False\n out[nonzero] = (np.real(x[nonzero]/y[nonzero]) > 0.0)\n return out\n\n\ndef _iuc(x, y):\n out = np.empty_like(x, dtype=bool)\n nonzero = (y != 0)\n # handles (x, y) = (0, 0) too\n out[~nonzero] = False\n out[nonzero] = (abs(x[nonzero]/y[nonzero]) < 1.0)\n return out\n\n\ndef _ouc(x, y):\n out = np.empty_like(x, dtype=bool)\n xzero = (x == 0)\n yzero = (y == 0)\n out[xzero & yzero] = False\n out[~xzero & yzero] = True\n out[~yzero] = (abs(x[~yzero]/y[~yzero]) > 1.0)\n return out\n\n\ndef _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,\n overwrite_b=False, check_finite=True):\n if sort is not None:\n # Disabled due to segfaults on win32, see ticket 1717.\n raise ValueError(\"The 'sort' input of qz() has to be None and will be \"\n \"removed in a future release. Use ordqz instead.\")\n\n if output not in ['real', 'complex', 'r', 'c']:\n raise ValueError(\"argument must be 'real', or 'complex'\")\n\n if check_finite:\n a1 = asarray_chkfinite(A)\n b1 = asarray_chkfinite(B)\n else:\n a1 = np.asarray(A)\n b1 = np.asarray(B)\n\n a_m, a_n = a1.shape\n b_m, b_n = b1.shape\n if not (a_m == a_n == b_m == b_n):\n raise ValueError(\"Array dimensions must be square and agree\")\n\n typa = a1.dtype.char\n if output in ['complex', 'c'] and typa not in ['F', 'D']:\n if typa in _double_precision:\n a1 = a1.astype('D')\n typa = 'D'\n else:\n a1 = a1.astype('F')\n typa = 'F'\n typb = b1.dtype.char\n if output in ['complex', 'c'] and typb not in ['F', 'D']:\n if typb in _double_precision:\n b1 = b1.astype('D')\n typb = 'D'\n else:\n b1 = b1.astype('F')\n typb = 'F'\n\n overwrite_a = overwrite_a or (_datacopied(a1, A))\n overwrite_b = overwrite_b or (_datacopied(b1, B))\n\n gges, = get_lapack_funcs(('gges',), (a1, b1))\n\n if lwork is None or lwork == -1:\n # get optimal work array size\n result = gges(lambda x: None, a1, b1, lwork=-1)\n lwork = result[-2][0].real.astype(np.int)\n\n sfunction = lambda x: None\n result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,\n overwrite_b=overwrite_b, sort_t=0)\n\n info = result[-1]\n if info < 0:\n raise ValueError(\"Illegal value in argument {} of gges\".format(-info))\n elif info > 0 and info <= a_n:\n warnings.warn(\"The QZ iteration failed. (a,b) are not in Schur \"\n \"form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be \"\n \"correct for J={},...,N\".format(info-1), LinAlgWarning,\n stacklevel=3)\n elif info == a_n+1:\n raise LinAlgError(\"Something other than QZ iteration failed\")\n elif info == a_n+2:\n raise LinAlgError(\"After reordering, roundoff changed values of some \"\n \"complex eigenvalues so that leading eigenvalues \"\n \"in the Generalized Schur form no longer satisfy \"\n \"sort=True. This could also be due to scaling.\")\n elif info == a_n+3:\n raise LinAlgError(\"Reordering failed in <s,d,c,z>tgsen\")\n\n return result, gges.typecode\n\n\ndef qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,\n overwrite_b=False, check_finite=True):\n \"\"\"\n QZ decomposition for generalized eigenvalues of a pair of matrices.\n\n The QZ, or generalized Schur, decomposition for a pair of N x N\n nonsymmetric matrices (A,B) is::\n\n (A,B) = (Q*AA*Z', Q*BB*Z')\n\n where AA, BB is in generalized Schur form if BB is upper-triangular\n with non-negative diagonal and AA is upper-triangular, or for real QZ\n decomposition (``output='real'``) block upper triangular with 1x1\n and 2x2 blocks. In this case, the 1x1 blocks correspond to real\n generalized eigenvalues and 2x2 blocks are 'standardized' by making\n the corresponding elements of BB have the form::\n\n [ a 0 ]\n [ 0 b ]\n\n and the pair of corresponding 2x2 blocks in AA and BB will have a complex\n conjugate pair of generalized eigenvalues. If (``output='complex'``) or\n A and B are complex matrices, Z' denotes the conjugate-transpose of Z.\n Q and Z are unitary matrices.\n\n Parameters\n ----------\n A : (N, N) array_like\n 2-D array to decompose\n B : (N, N) array_like\n 2-D array to decompose\n output : {'real', 'complex'}, optional\n Construct the real or complex QZ decomposition for real matrices.\n Default is 'real'.\n lwork : int, optional\n Work array size. If None or -1, it is automatically computed.\n sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional\n NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead.\n\n Specifies whether the upper eigenvalues should be sorted. A callable\n may be passed that, given a eigenvalue, returns a boolean denoting\n whether the eigenvalue should be sorted to the top-left (True). For\n real matrix pairs, the sort function takes three real arguments\n (alphar, alphai, beta). The eigenvalue\n ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or\n output='complex', the sort function takes two complex arguments\n (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively,\n string parameters may be used:\n\n - 'lhp' Left-hand plane (x.real < 0.0)\n - 'rhp' Right-hand plane (x.real > 0.0)\n - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)\n - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)\n\n Defaults to None (no sorting).\n overwrite_a : bool, optional\n Whether to overwrite data in a (may improve performance)\n overwrite_b : bool, optional\n Whether to overwrite data in b (may improve performance)\n check_finite : bool, optional\n If true checks the elements of `A` and `B` are finite numbers. If\n false does no checking and passes matrix through to\n underlying algorithm.\n\n Returns\n -------\n AA : (N, N) ndarray\n Generalized Schur form of A.\n BB : (N, N) ndarray\n Generalized Schur form of B.\n Q : (N, N) ndarray\n The left Schur vectors.\n Z : (N, N) ndarray\n The right Schur vectors.\n\n Notes\n -----\n Q is transposed versus the equivalent function in Matlab.\n\n .. versionadded:: 0.11.0\n\n Examples\n --------\n >>> from scipy import linalg\n >>> np.random.seed(1234)\n >>> A = np.arange(9).reshape((3, 3))\n >>> B = np.random.randn(3, 3)\n\n >>> AA, BB, Q, Z = linalg.qz(A, B)\n >>> AA\n array([[-13.40928183, -4.62471562, 1.09215523],\n [ 0. , 0. , 1.22805978],\n [ 0. , 0. , 0.31973817]])\n >>> BB\n array([[ 0.33362547, -1.37393632, 0.02179805],\n [ 0. , 1.68144922, 0.74683866],\n [ 0. , 0. , 0.9258294 ]])\n >>> Q\n array([[ 0.14134727, -0.97562773, 0.16784365],\n [ 0.49835904, -0.07636948, -0.86360059],\n [ 0.85537081, 0.20571399, 0.47541828]])\n >>> Z\n array([[-0.24900855, -0.51772687, 0.81850696],\n [-0.79813178, 0.58842606, 0.12938478],\n [-0.54861681, -0.6210585 , -0.55973739]])\n\n See also\n --------\n ordqz\n \"\"\"\n # output for real\n # AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info\n # output for complex\n # AA, BB, sdim, alpha, beta, vsl, vsr, work, info\n result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort,\n overwrite_a=overwrite_a, overwrite_b=overwrite_b,\n check_finite=check_finite)\n return result[0], result[1], result[-4], result[-3]\n\n\ndef ordqz(A, B, sort='lhp', output='real', overwrite_a=False,\n overwrite_b=False, check_finite=True):\n \"\"\"QZ decomposition for a pair of matrices with reordering.\n\n .. versionadded:: 0.17.0\n\n Parameters\n ----------\n A : (N, N) array_like\n 2-D array to decompose\n B : (N, N) array_like\n 2-D array to decompose\n sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional\n Specifies whether the upper eigenvalues should be sorted. A\n callable may be passed that, given an ordered pair ``(alpha,\n beta)`` representing the eigenvalue ``x = (alpha/beta)``,\n returns a boolean denoting whether the eigenvalue should be\n sorted to the top-left (True). For the real matrix pairs\n ``beta`` is real while ``alpha`` can be complex, and for\n complex matrix pairs both ``alpha`` and ``beta`` can be\n complex. The callable must be able to accept a NumPy\n array. Alternatively, string parameters may be used:\n\n - 'lhp' Left-hand plane (x.real < 0.0)\n - 'rhp' Right-hand plane (x.real > 0.0)\n - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)\n - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)\n\n With the predefined sorting functions, an infinite eigenvalue\n (i.e., ``alpha != 0`` and ``beta = 0``) is considered to lie in\n neither the left-hand nor the right-hand plane, but it is\n considered to lie outside the unit circle. For the eigenvalue\n ``(alpha, beta) = (0, 0)``, the predefined sorting functions\n all return `False`.\n output : str {'real','complex'}, optional\n Construct the real or complex QZ decomposition for real matrices.\n Default is 'real'.\n overwrite_a : bool, optional\n If True, the contents of A are overwritten.\n overwrite_b : bool, optional\n If True, the contents of B are overwritten.\n check_finite : bool, optional\n If true checks the elements of `A` and `B` are finite numbers. If\n false does no checking and passes matrix through to\n underlying algorithm.\n\n Returns\n -------\n AA : (N, N) ndarray\n Generalized Schur form of A.\n BB : (N, N) ndarray\n Generalized Schur form of B.\n alpha : (N,) ndarray\n alpha = alphar + alphai * 1j. See notes.\n beta : (N,) ndarray\n See notes.\n Q : (N, N) ndarray\n The left Schur vectors.\n Z : (N, N) ndarray\n The right Schur vectors.\n\n Notes\n -----\n On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the\n generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and\n ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T)\n that would result if the 2-by-2 diagonal blocks of the real generalized\n Schur form of (A,B) were further reduced to triangular form using complex\n unitary transformations. If ALPHAI(j) is zero, then the jth eigenvalue is\n real; if positive, then the ``j``th and ``(j+1)``st eigenvalues are a\n complex conjugate pair, with ``ALPHAI(j+1)`` negative.\n\n See also\n --------\n qz\n\n Examples\n --------\n >>> from scipy.linalg import ordqz\n >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])\n >>> B = np.array([[0, 6, 0, 0], [5, 0, 2, 1], [5, 2, 6, 6], [4, 7, 7, 7]])\n >>> AA, BB, alpha, beta, Q, Z = ordqz(A, B, sort='lhp')\n\n Since we have sorted for left half plane eigenvalues, negatives come first\n\n >>> (alpha/beta).real < 0\n array([ True, True, False, False], dtype=bool)\n\n \"\"\"\n # NOTE: should users be able to set these?\n lwork = None\n result, typ = _qz(A, B, output=output, lwork=lwork, sort=None,\n overwrite_a=overwrite_a, overwrite_b=overwrite_b,\n check_finite=check_finite)\n AA, BB, Q, Z = result[0], result[1], result[-4], result[-3]\n if typ not in 'cz':\n alpha, beta = result[3] + result[4]*1.j, result[5]\n else:\n alpha, beta = result[3], result[4]\n\n sfunction = _select_function(sort)\n select = sfunction(alpha, beta)\n\n tgsen, = get_lapack_funcs(('tgsen',), (AA, BB))\n\n if lwork is None or lwork == -1:\n result = tgsen(select, AA, BB, Q, Z, lwork=-1)\n lwork = result[-3][0].real.astype(np.int)\n # looks like wrong value passed to ZTGSYL if not\n lwork += 1\n\n liwork = None\n if liwork is None or liwork == -1:\n result = tgsen(select, AA, BB, Q, Z, liwork=-1)\n liwork = result[-2][0]\n\n result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork)\n\n info = result[-1]\n if info < 0:\n raise ValueError(\"Illegal value in argument %d of tgsen\" % -info)\n elif info == 1:\n raise ValueError(\"Reordering of (A, B) failed because the transformed\"\n \" matrix pair (A, B) would be too far from \"\n \"generalized Schur form; the problem is very \"\n \"ill-conditioned. (A, B) may have been partially \"\n \"reorded. If requested, 0 is returned in DIF(*), \"\n \"PL, and PR.\")\n\n # for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif,\n # work, iwork, info\n if typ in ['f', 'd']:\n alpha = result[2] + result[3] * 1.j\n return (result[0], result[1], alpha, result[4], result[5], result[6])\n # for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work,\n # iwork, info\n else:\n return result[0], result[1], result[2], result[3], result[4], result[5]\n"
] | [
[
"numpy.asarray_chkfinite",
"numpy.real",
"numpy.empty_like",
"numpy.asarray"
]
] |
FasrReactor/OpenBPS | [
"8b674ba810be36d863d261024330f271e6b31ed9"
] | [
"scripts/prepro.py"
] | [
"import re\nimport os\nimport sys\nimport numpy as np\nfrom pathlib import Path\n#\nMT = {18 : 'fission', 16 : '(n,2n)', 17 : '(n,3n)',\n 102 : '(n,gamma)', 103 : '(n,p)', 107 : '(n,a)'}\n\ndef parse_prepros(path, numproc=1):\n \"\"\"Parse prepro results\n\n Parametres:\n -----------\n path : str\n path to PREPRO calculation results\n numproc : int\n process number\n\n Returns:\n --------\n nuclidpath : iterable of str\n path to nuclides files\n nuclidnames : iterable of str\n name of nuclides\"\"\"\n nuclidpath = []\n nuclidnames = []\n for i in range(numproc):\n fg = [g for g in Path(os.path.join(path, str(i))).glob('*.tab')]\n nuclidpath = nuclidpath + fg\n for f in fg:\n nuclidnames.append(f.name.split(f.suffix))\n return nuclidpath, nuclidnames\n\n\ndef prepare_xs(path, numbergroup=1):\n \"\"\"Prepare the needed representation of cross-section data\n\n Paramteres:\n -----------\n path : str\n filename of cross-section data\n numbergroup : int\n number of energies neutron multigroup\n\n Returns:\n --------\n energies : iterable of str\n energy discritization by multigroups\n xslib : dict\n key : MT number, value cross-section values (str)\n \"\"\"\n def skip(ofile, number):\n for i in range(number):\n line = next(ofile)\n energies = np.zeros(numbergroup + 1)\n xslib = {}\n xs = []\n mtnum = ''\n with open(path,'r') as f:\n for line in f:\n res = re.search(\"MT=\\w*\\d+\", line)\n if res:\n mtnum = re.search(\"\\d+\", line).group()\n skip(f, 5)\n xs = np.zeros(numbergroup)\n while(len(line.rstrip()) > 1):\n dump = line.rstrip().split()\n num = 0\n en = 0.0\n x = 0.0\n for i, d in enumerate(dump):\n if (i % 3 == 0):\n num = int(d.rstrip())\n if (i % 3 == 1):\n en = float(d.rstrip())\n if (num < numbergroup + 2):\n if (energies[num - 1] == 0.0):\n energies[num - 1] = en\n if (i % 3 == 2):\n x = float(d.rstrip())\n if (num < numbergroup + 1):\n xs[num - 1] = x\n line = next(f)\n if (sum(xs) > 0):\n xslib[mtnum] = xs\n return energies, xslib\n\ndef create_xml(nuclidpath, nuclidnames, numbergroup, flux = None):\n \"\"\"Creating a xsmg.xml file with cross-section data\n\n Paramteres:\n -----------\n nuclidpath : iterable of str\n filename of cross-section data\n nuclidnames : iterable of str\n nuclide names\n numbergroup : int\n number of energies neutron multigroup\n flux : iterable of double (optional)\n weight function to collapse cross-section into reaction rates\n \"\"\"\n result = {}\n energies = None\n for p, n in zip(nuclidpath, nuclidnames):\n energies, result[n] = prepare_xs(p)\n from lxml import etree\n root = etree.Element(\"impxslib\")\n childe = etree.SubElement(root, \"energy\", ng=str(numbergroup))\n childe.text = ' '.join([\"{:10.4e}\".format(e) for e in energies[:]])\n childl = etree.SubElement(root, \"xslibs\", typex=\"xs\", ng=str(numbergroup))\n for k, v in result.items():\n for kk, vv in v.items():\n if (int(kk) in MT.keys()):\n childx = etree.SubElement(childl, \"xslib\", ng=str(numbergroup),\n reaction = MT[int(kk)], name = k)\n #childx.text = \"{:10.4e}\".format((interflux * v.sf).sum())\n childx.text = ' '.join([\"{:10.4e}\".format(e) for e in vv])\n tree=etree.ElementTree(root)\n tree.write('xsmg.xml', xml_declaration=True, encoding='utf-8',\n pretty_print=True)\n\nif __name__ == '__main__':\n numbergroup = int(sys.argv[-1])\n num = int(sys.argv[-2])\n gpath = sys.argv[-3]\n nuclidpath, nuclidnames = parse_prepros(gpath, num, numbergroup)\n create_xml(nuclidpath, nuclidnames, numbergroup)"
] | [
[
"numpy.zeros"
]
] |
jhurt/audio | [
"16de2b5b791bacfa8fb65b6fec3062d2e71fd725"
] | [
"torchaudio/functional/filtering.py"
] | [
"import math\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\n\nimport torchaudio._internal.fft\n\n\ndef _dB2Linear(x: float) -> float:\n return math.exp(x * math.log(10) / 20.0)\n\n\ndef _generate_wave_table(\n wave_type: str,\n data_type: str,\n table_size: int,\n min: float,\n max: float,\n phase: float,\n device: torch.device,\n) -> Tensor:\n r\"\"\"A helper fucntion for phaser. Generates a table with given parameters\n\n Args:\n wave_type (str): SINE or TRIANGULAR\n data_type (str): desired data_type ( `INT` or `FLOAT` )\n table_size (int): desired table size\n min (float): desired min value\n max (float): desired max value\n phase (float): desired phase\n device (torch.device): Torch device on which table must be generated\n Returns:\n Tensor: A 1D tensor with wave table values\n \"\"\"\n\n phase_offset = int(phase / math.pi / 2 * table_size + 0.5)\n\n t = torch.arange(table_size, device=device, dtype=torch.int32)\n\n point = (t + phase_offset) % table_size\n\n d = torch.zeros_like(point, device=device, dtype=torch.float64)\n\n if wave_type == \"SINE\":\n d = (torch.sin(point.to(torch.float64) / table_size * 2 * math.pi) + 1) / 2\n elif wave_type == \"TRIANGLE\":\n d = point.to(torch.float64) * 2 / table_size\n value = 4 * point // table_size\n d[value == 0] = d[value == 0] + 0.5\n d[value == 1] = 1.5 - d[value == 1]\n d[value == 2] = 1.5 - d[value == 2]\n d[value == 3] = d[value == 3] - 1.5\n\n d = d * (max - min) + min\n\n if data_type == \"INT\":\n mask = d < 0\n d[mask] = d[mask] - 0.5\n d[~mask] = d[~mask] + 0.5\n d = d.to(torch.int32)\n elif data_type == \"FLOAT\":\n d = d.to(torch.float32)\n\n return d\n\n\ndef allpass_biquad(\n waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707\n) -> Tensor:\n r\"\"\"Design two-pole all-pass filter. Similar to SoX implementation.\n\n Args:\n waveform(torch.Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n central_freq (float): central frequency (in Hz)\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n w0 = 2 * math.pi * central_freq / sample_rate\n alpha = math.sin(w0) / 2 / Q\n\n b0 = 1 - alpha\n b1 = -2 * math.cos(w0)\n b2 = 1 + alpha\n a0 = 1 + alpha\n a1 = -2 * math.cos(w0)\n a2 = 1 - alpha\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef band_biquad(\n waveform: Tensor,\n sample_rate: int,\n central_freq: float,\n Q: float = 0.707,\n noise: bool = False,\n) -> Tensor:\n r\"\"\"Design two-pole band filter. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n central_freq (float): central frequency (in Hz)\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).\n noise (bool, optional) : If ``True``, uses the alternate mode for un-pitched audio (e.g. percussion).\n If ``False``, uses mode oriented to pitched audio, i.e. voice, singing,\n or instrumental music (Default: ``False``).\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n w0 = 2 * math.pi * central_freq / sample_rate\n bw_Hz = central_freq / Q\n\n a0 = 1.0\n a2 = math.exp(-2 * math.pi * bw_Hz / sample_rate)\n a1 = -4 * a2 / (1 + a2) * math.cos(w0)\n\n b0 = math.sqrt(1 - a1 * a1 / (4 * a2)) * (1 - a2)\n\n if noise:\n mult = math.sqrt(((1 + a2) * (1 + a2) - a1 * a1) * (1 - a2) / (1 + a2)) / b0\n b0 *= mult\n\n b1 = 0.0\n b2 = 0.0\n\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef bandpass_biquad(\n waveform: Tensor,\n sample_rate: int,\n central_freq: float,\n Q: float = 0.707,\n const_skirt_gain: bool = False,\n) -> Tensor:\n r\"\"\"Design two-pole band-pass filter. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n central_freq (float): central frequency (in Hz)\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)\n const_skirt_gain (bool, optional) : If ``True``, uses a constant skirt gain (peak gain = Q).\n If ``False``, uses a constant 0dB peak gain. (Default: ``False``)\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n w0 = 2 * math.pi * central_freq / sample_rate\n alpha = math.sin(w0) / 2 / Q\n\n temp = math.sin(w0) / 2 if const_skirt_gain else alpha\n b0 = temp\n b1 = 0.0\n b2 = -temp\n a0 = 1 + alpha\n a1 = -2 * math.cos(w0)\n a2 = 1 - alpha\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef bandreject_biquad(\n waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707\n) -> Tensor:\n r\"\"\"Design two-pole band-reject filter. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n central_freq (float): central frequency (in Hz)\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n w0 = 2 * math.pi * central_freq / sample_rate\n alpha = math.sin(w0) / 2 / Q\n\n b0 = 1.0\n b1 = -2 * math.cos(w0)\n b2 = 1.0\n a0 = 1 + alpha\n a1 = -2 * math.cos(w0)\n a2 = 1 - alpha\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef bass_biquad(\n waveform: Tensor,\n sample_rate: int,\n gain: float,\n central_freq: float = 100,\n Q: float = 0.707,\n) -> Tensor:\n r\"\"\"Design a bass tone-control effect. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n gain (float): desired gain at the boost (or attenuation) in dB.\n central_freq (float, optional): central frequency (in Hz). (Default: ``100``)\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n w0 = 2 * math.pi * central_freq / sample_rate\n alpha = math.sin(w0) / 2 / Q\n A = math.exp(gain / 40 * math.log(10))\n\n temp1 = 2 * math.sqrt(A) * alpha\n temp2 = (A - 1) * math.cos(w0)\n temp3 = (A + 1) * math.cos(w0)\n\n b0 = A * ((A + 1) - temp2 + temp1)\n b1 = 2 * A * ((A - 1) - temp3)\n b2 = A * ((A + 1) - temp2 - temp1)\n a0 = (A + 1) + temp2 + temp1\n a1 = -2 * ((A - 1) + temp3)\n a2 = (A + 1) + temp2 - temp1\n\n return biquad(waveform, b0 / a0, b1 / a0, b2 / a0, a0 / a0, a1 / a0, a2 / a0)\n\n\ndef biquad(\n waveform: Tensor, b0: float, b1: float, b2: float, a0: float, a1: float, a2: float\n) -> Tensor:\n r\"\"\"Perform a biquad filter of input tensor. Initial conditions set to 0.\n https://en.wikipedia.org/wiki/Digital_biquad_filter\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n b0 (float): numerator coefficient of current input, x[n]\n b1 (float): numerator coefficient of input one time step ago x[n-1]\n b2 (float): numerator coefficient of input two time steps ago x[n-2]\n a0 (float): denominator coefficient of current output y[n], typically 1\n a1 (float): denominator coefficient of current output y[n-1]\n a2 (float): denominator coefficient of current output y[n-2]\n\n Returns:\n Tensor: Waveform with dimension of `(..., time)`\n \"\"\"\n\n device = waveform.device\n dtype = waveform.dtype\n\n output_waveform = lfilter(\n waveform,\n torch.tensor([a0, a1, a2], dtype=dtype, device=device),\n torch.tensor([b0, b1, b2], dtype=dtype, device=device),\n )\n return output_waveform\n\n\ndef contrast(waveform: Tensor, enhancement_amount: float = 75.0) -> Tensor:\n r\"\"\"Apply contrast effect. Similar to SoX implementation.\n Comparable with compression, this effect modifies an audio signal to make it sound louder\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n enhancement_amount (float): controls the amount of the enhancement\n Allowed range of values for enhancement_amount : 0-100\n Note that enhancement_amount = 0 still gives a significant contrast enhancement\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n \"\"\"\n\n if not 0 <= enhancement_amount <= 100:\n raise ValueError(\"Allowed range of values for enhancement_amount : 0-100\")\n\n contrast = enhancement_amount / 750.0\n\n temp1 = waveform * (math.pi / 2)\n temp2 = contrast * torch.sin(temp1 * 4)\n output_waveform = torch.sin(temp1 + temp2)\n\n return output_waveform\n\n\ndef dcshift(\n waveform: Tensor, shift: float, limiter_gain: Optional[float] = None\n) -> Tensor:\n r\"\"\"Apply a DC shift to the audio. Similar to SoX implementation.\n This can be useful to remove a DC offset\n (caused perhaps by a hardware problem in the recording chain) from the audio\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n shift (float): indicates the amount to shift the audio\n Allowed range of values for shift : -2.0 to +2.0\n limiter_gain (float): It is used only on peaks to prevent clipping\n It should have a value much less than 1 (e.g. 0.05 or 0.02)\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n \"\"\"\n output_waveform = waveform\n limiter_threshold = 0.0\n\n if limiter_gain is not None:\n limiter_threshold = 1.0 - (abs(shift) - limiter_gain)\n\n if limiter_gain is not None and shift > 0:\n mask = waveform > limiter_threshold\n temp = (\n (waveform[mask] - limiter_threshold)\n * limiter_gain\n / (1 - limiter_threshold)\n )\n output_waveform[mask] = (temp + limiter_threshold + shift).clamp(\n max=limiter_threshold\n )\n output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1)\n elif limiter_gain is not None and shift < 0:\n mask = waveform < -limiter_threshold\n temp = (\n (waveform[mask] + limiter_threshold)\n * limiter_gain\n / (1 - limiter_threshold)\n )\n output_waveform[mask] = (temp - limiter_threshold + shift).clamp(\n min=-limiter_threshold\n )\n output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1)\n else:\n output_waveform = (waveform + shift).clamp(min=-1, max=1)\n\n return output_waveform\n\n\ndef deemph_biquad(waveform: Tensor, sample_rate: int) -> Tensor:\n r\"\"\"Apply ISO 908 CD de-emphasis (shelving) IIR filter. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, Allowed sample rate ``44100`` or ``48000``\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n\n if sample_rate == 44100:\n central_freq = 5283\n width_slope = 0.4845\n gain = -9.477\n elif sample_rate == 48000:\n central_freq = 5356\n width_slope = 0.479\n gain = -9.62\n else:\n raise ValueError(\"Sample rate must be 44100 (audio-CD) or 48000 (DAT)\")\n\n w0 = 2 * math.pi * central_freq / sample_rate\n A = math.exp(gain / 40.0 * math.log(10))\n alpha = math.sin(w0) / 2 * math.sqrt((A + 1 / A) * (1 / width_slope - 1) + 2)\n\n temp1 = 2 * math.sqrt(A) * alpha\n temp2 = (A - 1) * math.cos(w0)\n temp3 = (A + 1) * math.cos(w0)\n\n b0 = A * ((A + 1) + temp2 + temp1)\n b1 = -2 * A * ((A - 1) + temp3)\n b2 = A * ((A + 1) + temp2 - temp1)\n a0 = (A + 1) - temp2 + temp1\n a1 = 2 * ((A - 1) - temp3)\n a2 = (A + 1) - temp2 - temp1\n\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef _add_noise_shaping(dithered_waveform: Tensor, waveform: Tensor) -> Tensor:\n r\"\"\"Noise shaping is calculated by error:\n error[n] = dithered[n] - original[n]\n noise_shaped_waveform[n] = dithered[n] + error[n-1]\n \"\"\"\n wf_shape = waveform.size()\n waveform = waveform.reshape(-1, wf_shape[-1])\n\n dithered_shape = dithered_waveform.size()\n dithered_waveform = dithered_waveform.reshape(-1, dithered_shape[-1])\n\n error = dithered_waveform - waveform\n\n # add error[n-1] to dithered_waveform[n], so offset the error by 1 index\n zeros = torch.zeros(1, dtype=error.dtype, device=error.device)\n for index in range(error.size()[0]):\n err = error[index]\n error_offset = torch.cat((zeros, err))\n error[index] = error_offset[: waveform.size()[1]]\n\n noise_shaped = dithered_waveform + error\n return noise_shaped.reshape(dithered_shape[:-1] + noise_shaped.shape[-1:])\n\n\ndef _apply_probability_distribution(\n waveform: Tensor, density_function: str = \"TPDF\"\n) -> Tensor:\n r\"\"\"Apply a probability distribution function on a waveform.\n\n Triangular probability density function (TPDF) dither noise has a\n triangular distribution; values in the center of the range have a higher\n probability of occurring.\n\n Rectangular probability density function (RPDF) dither noise has a\n uniform distribution; any value in the specified range has the same\n probability of occurring.\n\n Gaussian probability density function (GPDF) has a normal distribution.\n The relationship of probabilities of results follows a bell-shaped,\n or Gaussian curve, typical of dither generated by analog sources.\n Args:\n waveform (Tensor): Tensor of audio of dimension (..., time)\n density_function (str, optional): The density function of a\n continuous random variable (Default: ``\"TPDF\"``)\n Options: Triangular Probability Density Function - `TPDF`\n Rectangular Probability Density Function - `RPDF`\n Gaussian Probability Density Function - `GPDF`\n Returns:\n Tensor: waveform dithered with TPDF\n \"\"\"\n\n # pack batch\n shape = waveform.size()\n waveform = waveform.reshape(-1, shape[-1])\n\n channel_size = waveform.size()[0] - 1\n time_size = waveform.size()[-1] - 1\n\n random_channel = (\n int(\n torch.randint(\n channel_size,\n [\n 1,\n ],\n ).item()\n )\n if channel_size > 0\n else 0\n )\n random_time = (\n int(\n torch.randint(\n time_size,\n [\n 1,\n ],\n ).item()\n )\n if time_size > 0\n else 0\n )\n\n number_of_bits = 16\n up_scaling = 2 ** (number_of_bits - 1) - 2\n signal_scaled = waveform * up_scaling\n down_scaling = 2 ** (number_of_bits - 1)\n\n signal_scaled_dis = waveform\n if density_function == \"RPDF\":\n RPDF = waveform[random_channel][random_time] - 0.5\n\n signal_scaled_dis = signal_scaled + RPDF\n elif density_function == \"GPDF\":\n # TODO Replace by distribution code once\n # https://github.com/pytorch/pytorch/issues/29843 is resolved\n # gaussian = torch.distributions.normal.Normal(torch.mean(waveform, -1), 1).sample()\n\n num_rand_variables = 6\n\n gaussian = waveform[random_channel][random_time]\n for ws in num_rand_variables * [time_size]:\n rand_chan = int(\n torch.randint(\n channel_size,\n [\n 1,\n ],\n ).item()\n )\n gaussian += waveform[rand_chan][\n int(\n torch.randint(\n ws,\n [\n 1,\n ],\n ).item()\n )\n ]\n\n signal_scaled_dis = signal_scaled + gaussian\n else:\n # dtype needed for https://github.com/pytorch/pytorch/issues/32358\n TPDF = torch.bartlett_window(\n time_size + 1, dtype=signal_scaled.dtype, device=signal_scaled.device\n )\n TPDF = TPDF.repeat((channel_size + 1), 1)\n signal_scaled_dis = signal_scaled + TPDF\n\n quantised_signal_scaled = torch.round(signal_scaled_dis)\n quantised_signal = quantised_signal_scaled / down_scaling\n\n # unpack batch\n return quantised_signal.reshape(shape[:-1] + quantised_signal.shape[-1:])\n\n\ndef dither(\n waveform: Tensor, density_function: str = \"TPDF\", noise_shaping: bool = False\n) -> Tensor:\n r\"\"\"Dither increases the perceived dynamic range of audio stored at a\n particular bit-depth by eliminating nonlinear truncation distortion\n (i.e. adding minimally perceived noise to mask distortion caused by quantization).\n\n Args:\n waveform (Tensor): Tensor of audio of dimension (..., time)\n density_function (str, optional):\n The density function of a continuous random variable. One of\n ``\"TPDF\"`` (Triangular Probability Density Function),\n ``\"RPDF\"`` (Rectangular Probability Density Function) or\n ``\"GPDF\"`` (Gaussian Probability Density Function) (Default: ``\"TPDF\"``).\n noise_shaping (bool, optional): a filtering process that shapes the spectral\n energy of quantisation error (Default: ``False``)\n\n Returns:\n Tensor: waveform dithered\n \"\"\"\n dithered = _apply_probability_distribution(\n waveform, density_function=density_function\n )\n\n if noise_shaping:\n return _add_noise_shaping(dithered, waveform)\n else:\n return dithered\n\n\ndef equalizer_biquad(\n waveform: Tensor,\n sample_rate: int,\n center_freq: float,\n gain: float,\n Q: float = 0.707,\n) -> Tensor:\n r\"\"\"Design biquad peaking equalizer filter and perform filtering. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n center_freq (float): filter's central frequency\n gain (float): desired gain at the boost (or attenuation) in dB\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n \"\"\"\n w0 = 2 * math.pi * center_freq / sample_rate\n A = math.exp(gain / 40.0 * math.log(10))\n alpha = math.sin(w0) / 2 / Q\n\n b0 = 1 + alpha * A\n b1 = -2 * math.cos(w0)\n b2 = 1 - alpha * A\n a0 = 1 + alpha / A\n a1 = -2 * math.cos(w0)\n a2 = 1 - alpha / A\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef flanger(\n waveform: Tensor,\n sample_rate: int,\n delay: float = 0.0,\n depth: float = 2.0,\n regen: float = 0.0,\n width: float = 71.0,\n speed: float = 0.5,\n phase: float = 25.0,\n modulation: str = \"sinusoidal\",\n interpolation: str = \"linear\",\n) -> Tensor:\n r\"\"\"Apply a flanger effect to the audio. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., channel, time)` .\n Max 4 channels allowed\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n delay (float): desired delay in milliseconds(ms)\n Allowed range of values are 0 to 30\n depth (float): desired delay depth in milliseconds(ms)\n Allowed range of values are 0 to 10\n regen (float): desired regen(feeback gain) in dB\n Allowed range of values are -95 to 95\n width (float): desired width(delay gain) in dB\n Allowed range of values are 0 to 100\n speed (float): modulation speed in Hz\n Allowed range of values are 0.1 to 10\n phase (float): percentage phase-shift for multi-channel\n Allowed range of values are 0 to 100\n modulation (str): Use either \"sinusoidal\" or \"triangular\" modulation. (Default: ``sinusoidal``)\n interpolation (str): Use either \"linear\" or \"quadratic\" for delay-line interpolation. (Default: ``linear``)\n\n Returns:\n Tensor: Waveform of dimension of `(..., channel, time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n\n Scott Lehman, Effects Explained,\n https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html\n \"\"\"\n\n if modulation not in (\"sinusoidal\", \"triangular\"):\n raise ValueError(\"Only 'sinusoidal' or 'triangular' modulation allowed\")\n\n if interpolation not in (\"linear\", \"quadratic\"):\n raise ValueError(\"Only 'linear' or 'quadratic' interpolation allowed\")\n\n actual_shape = waveform.shape\n device, dtype = waveform.device, waveform.dtype\n\n if actual_shape[-2] > 4:\n raise ValueError(\"Max 4 channels allowed\")\n\n # convert to 3D (batch, channels, time)\n waveform = waveform.view(-1, actual_shape[-2], actual_shape[-1])\n\n # Scaling\n feedback_gain = regen / 100\n delay_gain = width / 100\n channel_phase = phase / 100\n delay_min = delay / 1000\n delay_depth = depth / 1000\n\n n_channels = waveform.shape[-2]\n\n if modulation == \"sinusoidal\":\n wave_type = \"SINE\"\n else:\n wave_type = \"TRIANGLE\"\n\n # Balance output:\n in_gain = 1.0 / (1 + delay_gain)\n delay_gain = delay_gain / (1 + delay_gain)\n\n # Balance feedback loop:\n delay_gain = delay_gain * (1 - abs(feedback_gain))\n\n delay_buf_length = int((delay_min + delay_depth) * sample_rate + 0.5)\n delay_buf_length = delay_buf_length + 2\n\n delay_bufs = torch.zeros(\n waveform.shape[0], n_channels, delay_buf_length, dtype=dtype, device=device\n )\n delay_last = torch.zeros(waveform.shape[0], n_channels, dtype=dtype, device=device)\n\n lfo_length = int(sample_rate / speed)\n\n table_min = math.floor(delay_min * sample_rate + 0.5)\n table_max = delay_buf_length - 2.0\n\n lfo = _generate_wave_table(\n wave_type=wave_type,\n data_type=\"FLOAT\",\n table_size=lfo_length,\n min=float(table_min),\n max=float(table_max),\n phase=3 * math.pi / 2,\n device=device,\n )\n\n output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device)\n\n delay_buf_pos = 0\n lfo_pos = 0\n channel_idxs = torch.arange(0, n_channels, device=device)\n\n for i in range(waveform.shape[-1]):\n\n delay_buf_pos = (delay_buf_pos + delay_buf_length - 1) % delay_buf_length\n\n cur_channel_phase = (channel_idxs * lfo_length * channel_phase + 0.5).to(\n torch.int64\n )\n delay_tensor = lfo[(lfo_pos + cur_channel_phase) % lfo_length]\n frac_delay = torch.frac(delay_tensor)\n delay_tensor = torch.floor(delay_tensor)\n\n int_delay = delay_tensor.to(torch.int64)\n\n temp = waveform[:, :, i]\n\n delay_bufs[:, :, delay_buf_pos] = temp + delay_last * feedback_gain\n\n delayed_0 = delay_bufs[\n :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length\n ]\n\n int_delay = int_delay + 1\n\n delayed_1 = delay_bufs[\n :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length\n ]\n\n int_delay = int_delay + 1\n\n if interpolation == \"linear\":\n delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay\n else:\n delayed_2 = delay_bufs[\n :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length\n ]\n\n int_delay = int_delay + 1\n\n delayed_2 = delayed_2 - delayed_0\n delayed_1 = delayed_1 - delayed_0\n a = delayed_2 * 0.5 - delayed_1\n b = delayed_1 * 2 - delayed_2 * 0.5\n\n delayed = delayed_0 + (a * frac_delay + b) * frac_delay\n\n delay_last = delayed\n output_waveform[:, :, i] = waveform[:, :, i] * in_gain + delayed * delay_gain\n\n lfo_pos = (lfo_pos + 1) % lfo_length\n\n return output_waveform.clamp(min=-1, max=1).view(actual_shape)\n\n\ndef gain(waveform: Tensor, gain_db: float = 1.0) -> Tensor:\n r\"\"\"Apply amplification or attenuation to the whole waveform.\n\n Args:\n waveform (Tensor): Tensor of audio of dimension (..., time).\n gain_db (float, optional) Gain adjustment in decibels (dB) (Default: ``1.0``).\n\n Returns:\n Tensor: the whole waveform amplified by gain_db.\n \"\"\"\n if gain_db == 0:\n return waveform\n\n ratio = 10 ** (gain_db / 20)\n\n return waveform * ratio\n\n\ndef highpass_biquad(\n waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707\n) -> Tensor:\n r\"\"\"Design biquad highpass filter and perform filtering. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n cutoff_freq (float): filter cutoff frequency\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)\n\n Returns:\n Tensor: Waveform dimension of `(..., time)`\n \"\"\"\n w0 = 2 * math.pi * cutoff_freq / sample_rate\n alpha = math.sin(w0) / 2.0 / Q\n\n b0 = (1 + math.cos(w0)) / 2\n b1 = -1 - math.cos(w0)\n b2 = b0\n a0 = 1 + alpha\n a1 = -2 * math.cos(w0)\n a2 = 1 - alpha\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef _lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor):\n n_order = a_coeffs_flipped.size(0)\n for i_sample, o0 in enumerate(input_signal_windows.t()):\n windowed_output_signal = padded_output_waveform[\n :, i_sample:i_sample + n_order\n ]\n o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1)\n padded_output_waveform[:, i_sample + n_order - 1] = o0\n\n\ntry:\n _lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop\nexcept RuntimeError as err:\n assert str(err) == 'No such operator torchaudio::_lfilter_core_loop'\n _lfilter_core_cpu_loop = _lfilter_core_generic_loop\n\n\ndef _lfilter_core(\n waveform: Tensor,\n a_coeffs: Tensor,\n b_coeffs: Tensor,\n) -> Tensor:\n\n assert a_coeffs.size(0) == b_coeffs.size(0)\n assert len(waveform.size()) == 2\n assert waveform.device == a_coeffs.device\n assert b_coeffs.device == a_coeffs.device\n\n n_channel, n_sample = waveform.size()\n n_order = a_coeffs.size(0)\n assert n_order > 0\n\n # Pad the input and create output\n\n padded_waveform = torch.nn.functional.pad(waveform, [n_order - 1, 0])\n padded_output_waveform = torch.zeros_like(padded_waveform)\n\n # Set up the coefficients matrix\n # Flip coefficients' order\n a_coeffs_flipped = a_coeffs.flip(0)\n b_coeffs_flipped = b_coeffs.flip(0)\n\n # calculate windowed_input_signal in parallel using convolution\n input_signal_windows = torch.nn.functional.conv1d(\n padded_waveform.unsqueeze(1),\n b_coeffs_flipped.view(1, 1, -1)\n ).squeeze(1)\n\n input_signal_windows.div_(a_coeffs[0])\n a_coeffs_flipped.div_(a_coeffs[0])\n\n if input_signal_windows.device == torch.device('cpu') and\\\n a_coeffs_flipped.device == torch.device('cpu') and\\\n padded_output_waveform.device == torch.device('cpu'):\n _lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform)\n else:\n _lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform)\n\n output = padded_output_waveform[:, n_order - 1:]\n return output\n\ntry:\n _lfilter = torch.ops.torchaudio._lfilter\nexcept RuntimeError as err:\n assert str(err) == 'No such operator torchaudio::_lfilter'\n _lfilter = _lfilter_core\n\n\ndef lfilter(\n waveform: Tensor,\n a_coeffs: Tensor,\n b_coeffs: Tensor,\n clamp: bool = True,\n) -> Tensor:\n r\"\"\"Perform an IIR filter by evaluating difference equation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of ``(..., time)``. Must be normalized to -1 to 1.\n a_coeffs (Tensor): denominator coefficients of difference equation of dimension of ``(n_order + 1)``.\n Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``.\n Must be same size as b_coeffs (pad with 0's as necessary).\n b_coeffs (Tensor): numerator coefficients of difference equation of dimension of ``(n_order + 1)``.\n Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``.\n Must be same size as a_coeffs (pad with 0's as necessary).\n clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)\n\n Returns:\n Tensor: Waveform with dimension of ``(..., time)``.\n \"\"\"\n # pack batch\n shape = waveform.size()\n waveform = waveform.reshape(-1, shape[-1])\n\n output = _lfilter(waveform, a_coeffs, b_coeffs)\n\n if clamp:\n output = torch.clamp(output, min=-1.0, max=1.0)\n\n # unpack batch\n output = output.reshape(shape[:-1] + output.shape[-1:])\n\n return output\n\n\ndef lowpass_biquad(\n waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707\n) -> Tensor:\n r\"\"\"Design biquad lowpass filter and perform filtering. Similar to SoX implementation.\n\n Args:\n waveform (torch.Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n cutoff_freq (float): filter cutoff frequency\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n \"\"\"\n w0 = 2 * math.pi * cutoff_freq / sample_rate\n alpha = math.sin(w0) / 2 / Q\n\n b0 = (1 - math.cos(w0)) / 2\n b1 = 1 - math.cos(w0)\n b2 = b0\n a0 = 1 + alpha\n a1 = -2 * math.cos(w0)\n a2 = 1 - alpha\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef _overdrive_core_loop_generic(\n waveform: Tensor,\n temp: Tensor,\n last_in: Tensor,\n last_out: Tensor,\n output_waveform: Tensor\n):\n for i in range(waveform.shape[-1]):\n last_out = temp[:, i] - last_in + 0.995 * last_out\n last_in = temp[:, i]\n output_waveform[:, i] = waveform[:, i] * 0.5 + last_out * 0.75\n\n\ntry:\n _overdrive_core_loop_cpu = torch.ops.torchaudio._overdrive_core_loop\nexcept RuntimeError as err:\n assert str(err) == 'No such operator torchaudio::_overdrive_core_loop'\n _overdrive_core_loop_cpu = _overdrive_core_loop_generic\n\n\ndef overdrive(waveform: Tensor, gain: float = 20, colour: float = 20) -> Tensor:\n r\"\"\"Apply a overdrive effect to the audio. Similar to SoX implementation.\n This effect applies a non linear distortion to the audio signal.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n gain (float): desired gain at the boost (or attenuation) in dB\n Allowed range of values are 0 to 100\n colour (float): controls the amount of even harmonic content in the over-driven output\n Allowed range of values are 0 to 100\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n \"\"\"\n actual_shape = waveform.shape\n device, dtype = waveform.device, waveform.dtype\n\n # convert to 2D (..,time)\n waveform = waveform.view(-1, actual_shape[-1])\n\n gain = _dB2Linear(gain)\n colour = colour / 200\n last_in = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device)\n last_out = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device)\n\n temp = waveform * gain + colour\n\n mask1 = temp < -1\n temp[mask1] = torch.tensor(-2.0 / 3.0, dtype=dtype, device=device)\n # Wrapping the constant with Tensor is required for Torchscript\n\n mask2 = temp > 1\n temp[mask2] = torch.tensor(2.0 / 3.0, dtype=dtype, device=device)\n\n mask3 = ~mask1 & ~mask2\n temp[mask3] = temp[mask3] - (temp[mask3] ** 3) * (1.0 / 3)\n\n output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device)\n\n # Uses CPU optimized loop function if available for CPU device\n if device == torch.device('cpu'):\n _overdrive_core_loop_cpu(waveform, temp, last_in, last_out, output_waveform)\n else:\n _overdrive_core_loop_generic(waveform, temp, last_in, last_out, output_waveform)\n\n return output_waveform.clamp(min=-1, max=1).view(actual_shape)\n\n\ndef phaser(\n waveform: Tensor,\n sample_rate: int,\n gain_in: float = 0.4,\n gain_out: float = 0.74,\n delay_ms: float = 3.0,\n decay: float = 0.4,\n mod_speed: float = 0.5,\n sinusoidal: bool = True,\n) -> Tensor:\n r\"\"\"Apply a phasing effect to the audio. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n gain_in (float): desired input gain at the boost (or attenuation) in dB\n Allowed range of values are 0 to 1\n gain_out (float): desired output gain at the boost (or attenuation) in dB\n Allowed range of values are 0 to 1e9\n delay_ms (float): desired delay in milli seconds\n Allowed range of values are 0 to 5.0\n decay (float): desired decay relative to gain-in\n Allowed range of values are 0 to 0.99\n mod_speed (float): modulation speed in Hz\n Allowed range of values are 0.1 to 2\n sinusoidal (bool): If ``True``, uses sinusoidal modulation (preferable for multiple instruments)\n If ``False``, uses triangular modulation (gives single instruments a sharper phasing effect)\n (Default: ``True``)\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n Scott Lehman, Effects Explained,\n https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html\n \"\"\"\n actual_shape = waveform.shape\n device, dtype = waveform.device, waveform.dtype\n\n # convert to 2D (channels,time)\n waveform = waveform.view(-1, actual_shape[-1])\n\n delay_buf_len = int((delay_ms * 0.001 * sample_rate) + 0.5)\n delay_buf = torch.zeros(\n waveform.shape[0], delay_buf_len, dtype=dtype, device=device\n )\n\n mod_buf_len = int(sample_rate / mod_speed + 0.5)\n\n if sinusoidal:\n wave_type = \"SINE\"\n else:\n wave_type = \"TRIANGLE\"\n\n mod_buf = _generate_wave_table(\n wave_type=wave_type,\n data_type=\"INT\",\n table_size=mod_buf_len,\n min=1.0,\n max=float(delay_buf_len),\n phase=math.pi / 2,\n device=device,\n )\n\n delay_pos = 0\n mod_pos = 0\n\n output_waveform_pre_gain_list = []\n waveform = waveform * gain_in\n delay_buf = delay_buf * decay\n waveform_list = [waveform[:, i] for i in range(waveform.size(1))]\n delay_buf_list = [delay_buf[:, i] for i in range(delay_buf.size(1))]\n mod_buf_list = [mod_buf[i] for i in range(mod_buf.size(0))]\n\n for i in range(waveform.shape[-1]):\n idx = int((delay_pos + mod_buf_list[mod_pos]) % delay_buf_len)\n mod_pos = (mod_pos + 1) % mod_buf_len\n delay_pos = (delay_pos + 1) % delay_buf_len\n temp = (waveform_list[i]) + (delay_buf_list[idx])\n delay_buf_list[delay_pos] = temp * decay\n output_waveform_pre_gain_list.append(temp)\n\n output_waveform = torch.stack(output_waveform_pre_gain_list, dim=1).to(\n dtype=dtype, device=device\n )\n output_waveform.mul_(gain_out)\n\n return output_waveform.clamp(min=-1, max=1).view(actual_shape)\n\n\ndef riaa_biquad(waveform: Tensor, sample_rate: int) -> Tensor:\n r\"\"\"Apply RIAA vinyl playback equalisation. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz).\n Allowed sample rates in Hz : ``44100``,``48000``,``88200``,``96000``\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n\n if sample_rate == 44100:\n zeros = [-0.2014898, 0.9233820]\n poles = [0.7083149, 0.9924091]\n\n elif sample_rate == 48000:\n zeros = [-0.1766069, 0.9321590]\n poles = [0.7396325, 0.9931330]\n\n elif sample_rate == 88200:\n zeros = [-0.1168735, 0.9648312]\n poles = [0.8590646, 0.9964002]\n\n elif sample_rate == 96000:\n zeros = [-0.1141486, 0.9676817]\n poles = [0.8699137, 0.9966946]\n\n else:\n raise ValueError(\"Sample rate must be 44.1k, 48k, 88.2k, or 96k\")\n\n # polynomial coefficients with roots zeros[0] and zeros[1]\n b0 = 1.0\n b1 = -(zeros[0] + zeros[1])\n b2 = zeros[0] * zeros[1]\n\n # polynomial coefficients with roots poles[0] and poles[1]\n a0 = 1.0\n a1 = -(poles[0] + poles[1])\n a2 = poles[0] * poles[1]\n\n # Normalise to 0dB at 1kHz\n y = 2 * math.pi * 1000 / sample_rate\n b_re = b0 + b1 * math.cos(-y) + b2 * math.cos(-2 * y)\n a_re = a0 + a1 * math.cos(-y) + a2 * math.cos(-2 * y)\n b_im = b1 * math.sin(-y) + b2 * math.sin(-2 * y)\n a_im = a1 * math.sin(-y) + a2 * math.sin(-2 * y)\n g = 1 / math.sqrt((b_re ** 2 + b_im ** 2) / (a_re ** 2 + a_im ** 2))\n\n b0 *= g\n b1 *= g\n b2 *= g\n\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef treble_biquad(\n waveform: Tensor,\n sample_rate: int,\n gain: float,\n central_freq: float = 3000,\n Q: float = 0.707,\n) -> Tensor:\n r\"\"\"Design a treble tone-control effect. Similar to SoX implementation.\n\n Args:\n waveform (Tensor): audio waveform of dimension of `(..., time)`\n sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)\n gain (float): desired gain at the boost (or attenuation) in dB.\n central_freq (float, optional): central frequency (in Hz). (Default: ``3000``)\n Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).\n\n Returns:\n Tensor: Waveform of dimension of `(..., time)`\n\n References:\n http://sox.sourceforge.net/sox.html\n https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF\n \"\"\"\n w0 = 2 * math.pi * central_freq / sample_rate\n alpha = math.sin(w0) / 2 / Q\n A = math.exp(gain / 40 * math.log(10))\n\n temp1 = 2 * math.sqrt(A) * alpha\n temp2 = (A - 1) * math.cos(w0)\n temp3 = (A + 1) * math.cos(w0)\n\n b0 = A * ((A + 1) + temp2 + temp1)\n b1 = -2 * A * ((A - 1) + temp3)\n b2 = A * ((A + 1) + temp2 - temp1)\n a0 = (A + 1) - temp2 + temp1\n a1 = 2 * ((A - 1) - temp3)\n a2 = (A + 1) - temp2 - temp1\n\n return biquad(waveform, b0, b1, b2, a0, a1, a2)\n\n\ndef _measure(\n measure_len_ws: int,\n samples: Tensor,\n spectrum: Tensor,\n noise_spectrum: Tensor,\n spectrum_window: Tensor,\n spectrum_start: int,\n spectrum_end: int,\n cepstrum_window: Tensor,\n cepstrum_start: int,\n cepstrum_end: int,\n noise_reduction_amount: float,\n measure_smooth_time_mult: float,\n noise_up_time_mult: float,\n noise_down_time_mult: float,\n index_ns: int,\n boot_count: int,\n) -> float:\n\n assert spectrum.size()[-1] == noise_spectrum.size()[-1]\n\n samplesLen_ns = samples.size()[-1]\n dft_len_ws = spectrum.size()[-1]\n\n dftBuf = torch.zeros(dft_len_ws)\n\n _index_ns = torch.tensor(\n [index_ns] + [(index_ns + i) % samplesLen_ns for i in range(1, measure_len_ws)]\n )\n dftBuf[:measure_len_ws] = samples[_index_ns] * spectrum_window[:measure_len_ws]\n\n # memset(c->dftBuf + i, 0, (p->dft_len_ws - i) * sizeof(*c->dftBuf));\n dftBuf[measure_len_ws:dft_len_ws].zero_()\n\n # lsx_safe_rdft((int)p->dft_len_ws, 1, c->dftBuf);\n _dftBuf = torchaudio._internal.fft.rfft(dftBuf)\n\n # memset(c->dftBuf, 0, p->spectrum_start * sizeof(*c->dftBuf));\n _dftBuf[:spectrum_start].zero_()\n\n mult: float = (\n boot_count / (1.0 + boot_count) if boot_count >= 0 else measure_smooth_time_mult\n )\n\n _d = _dftBuf[spectrum_start:spectrum_end].abs()\n spectrum[spectrum_start:spectrum_end].mul_(mult).add_(_d * (1 - mult))\n _d = spectrum[spectrum_start:spectrum_end] ** 2\n\n _zeros = torch.zeros(spectrum_end - spectrum_start)\n _mult = (\n _zeros\n if boot_count >= 0\n else torch.where(\n _d > noise_spectrum[spectrum_start:spectrum_end],\n torch.tensor(noise_up_time_mult), # if\n torch.tensor(noise_down_time_mult), # else\n )\n )\n\n noise_spectrum[spectrum_start:spectrum_end].mul_(_mult).add_(_d * (1 - _mult))\n _d = torch.sqrt(\n torch.max(\n _zeros,\n _d - noise_reduction_amount * noise_spectrum[spectrum_start:spectrum_end],\n )\n )\n\n _cepstrum_Buf: Tensor = torch.zeros(dft_len_ws >> 1)\n _cepstrum_Buf[spectrum_start:spectrum_end] = _d * cepstrum_window\n _cepstrum_Buf[spectrum_end:dft_len_ws >> 1].zero_()\n\n # lsx_safe_rdft((int)p->dft_len_ws >> 1, 1, c->dftBuf);\n _cepstrum_Buf = torchaudio._internal.fft.rfft(_cepstrum_Buf)\n\n result: float = float(\n torch.sum(_cepstrum_Buf[cepstrum_start:cepstrum_end].abs().pow(2))\n )\n result = (\n math.log(result / (cepstrum_end - cepstrum_start)) if result > 0 else -math.inf\n )\n return max(0, 21 + result)\n\n\ndef vad(\n waveform: Tensor,\n sample_rate: int,\n trigger_level: float = 7.0,\n trigger_time: float = 0.25,\n search_time: float = 1.0,\n allowed_gap: float = 0.25,\n pre_trigger_time: float = 0.0,\n # Fine-tuning parameters\n boot_time: float = 0.35,\n noise_up_time: float = 0.1,\n noise_down_time: float = 0.01,\n noise_reduction_amount: float = 1.35,\n measure_freq: float = 20.0,\n measure_duration: Optional[float] = None,\n measure_smooth_time: float = 0.4,\n hp_filter_freq: float = 50.0,\n lp_filter_freq: float = 6000.0,\n hp_lifter_freq: float = 150.0,\n lp_lifter_freq: float = 2000.0,\n) -> Tensor:\n r\"\"\"Voice Activity Detector. Similar to SoX implementation.\n Attempts to trim silence and quiet background sounds from the ends of recordings of speech.\n The algorithm currently uses a simple cepstral power measurement to detect voice,\n so may be fooled by other things, especially music.\n\n The effect can trim only from the front of the audio,\n so in order to trim from the back, the reverse effect must also be used.\n\n Args:\n waveform (Tensor): Tensor of audio of dimension `(..., time)`\n sample_rate (int): Sample rate of audio signal.\n trigger_level (float, optional): The measurement level used to trigger activity detection.\n This may need to be cahnged depending on the noise level, signal level,\n and other characteristics of the input audio. (Default: 7.0)\n trigger_time (float, optional): The time constant (in seconds)\n used to help ignore short bursts of sound. (Default: 0.25)\n search_time (float, optional): The amount of audio (in seconds)\n to search for quieter/shorter bursts of audio to include prior\n to the detected trigger point. (Default: 1.0)\n allowed_gap (float, optional): The allowed gap (in seconds) between\n quiteter/shorter bursts of audio to include prior\n to the detected trigger point. (Default: 0.25)\n pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve\n before the trigger point and any found quieter/shorter bursts. (Default: 0.0)\n boot_time (float, optional) The algorithm (internally) uses adaptive noise\n estimation/reduction in order to detect the start of the wanted audio.\n This option sets the time for the initial noise estimate. (Default: 0.35)\n noise_up_time (float, optional) Time constant used by the adaptive noise estimator\n for when the noise level is increasing. (Default: 0.1)\n noise_down_time (float, optional) Time constant used by the adaptive noise estimator\n for when the noise level is decreasing. (Default: 0.01)\n noise_reduction_amount (float, optional) Amount of noise reduction to use in\n the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35)\n measure_freq (float, optional) Frequency of the algorithm’s\n processing/measurements. (Default: 20.0)\n measure_duration: (float, optional) Measurement duration.\n (Default: Twice the measurement period; i.e. with overlap.)\n measure_smooth_time (float, optional) Time constant used to smooth\n spectral measurements. (Default: 0.4)\n hp_filter_freq (float, optional) \"Brick-wall\" frequency of high-pass filter applied\n at the input to the detector algorithm. (Default: 50.0)\n lp_filter_freq (float, optional) \"Brick-wall\" frequency of low-pass filter applied\n at the input to the detector algorithm. (Default: 6000.0)\n hp_lifter_freq (float, optional) \"Brick-wall\" frequency of high-pass lifter used\n in the detector algorithm. (Default: 150.0)\n lp_lifter_freq (float, optional) \"Brick-wall\" frequency of low-pass lifter used\n in the detector algorithm. (Default: 2000.0)\n\n Returns:\n Tensor: Tensor of audio of dimension (..., time).\n\n References:\n http://sox.sourceforge.net/sox.html\n \"\"\"\n\n measure_duration: float = (\n 2.0 / measure_freq if measure_duration is None else measure_duration\n )\n\n measure_len_ws = int(sample_rate * measure_duration + 0.5)\n measure_len_ns = measure_len_ws\n # for (dft_len_ws = 16; dft_len_ws < measure_len_ws; dft_len_ws <<= 1);\n dft_len_ws = 16\n while dft_len_ws < measure_len_ws:\n dft_len_ws *= 2\n\n measure_period_ns = int(sample_rate / measure_freq + 0.5)\n measures_len = math.ceil(search_time * measure_freq)\n search_pre_trigger_len_ns = measures_len * measure_period_ns\n gap_len = int(allowed_gap * measure_freq + 0.5)\n\n fixed_pre_trigger_len_ns = int(pre_trigger_time * sample_rate + 0.5)\n samplesLen_ns = (\n fixed_pre_trigger_len_ns + search_pre_trigger_len_ns + measure_len_ns\n )\n\n spectrum_window = torch.zeros(measure_len_ws)\n for i in range(measure_len_ws):\n # sox.h:741 define SOX_SAMPLE_MIN (sox_sample_t)SOX_INT_MIN(32)\n spectrum_window[i] = 2.0 / math.sqrt(float(measure_len_ws))\n # lsx_apply_hann(spectrum_window, (int)measure_len_ws);\n spectrum_window *= torch.hann_window(measure_len_ws, dtype=torch.float)\n\n spectrum_start: int = int(hp_filter_freq / sample_rate * dft_len_ws + 0.5)\n spectrum_start: int = max(spectrum_start, 1)\n spectrum_end: int = int(lp_filter_freq / sample_rate * dft_len_ws + 0.5)\n spectrum_end: int = min(spectrum_end, dft_len_ws // 2)\n\n cepstrum_window = torch.zeros(spectrum_end - spectrum_start)\n for i in range(spectrum_end - spectrum_start):\n cepstrum_window[i] = 2.0 / math.sqrt(float(spectrum_end) - spectrum_start)\n # lsx_apply_hann(cepstrum_window,(int)(spectrum_end - spectrum_start));\n cepstrum_window *= torch.hann_window(\n spectrum_end - spectrum_start, dtype=torch.float\n )\n\n cepstrum_start = math.ceil(sample_rate * 0.5 / lp_lifter_freq)\n cepstrum_end = math.floor(sample_rate * 0.5 / hp_lifter_freq)\n cepstrum_end = min(cepstrum_end, dft_len_ws // 4)\n\n assert cepstrum_end > cepstrum_start\n\n noise_up_time_mult = math.exp(-1.0 / (noise_up_time * measure_freq))\n noise_down_time_mult = math.exp(-1.0 / (noise_down_time * measure_freq))\n measure_smooth_time_mult = math.exp(-1.0 / (measure_smooth_time * measure_freq))\n trigger_meas_time_mult = math.exp(-1.0 / (trigger_time * measure_freq))\n\n boot_count_max = int(boot_time * measure_freq - 0.5)\n measure_timer_ns = measure_len_ns\n boot_count = measures_index = flushedLen_ns = samplesIndex_ns = 0\n\n # pack batch\n shape = waveform.size()\n waveform = waveform.view(-1, shape[-1])\n\n n_channels, ilen = waveform.size()\n\n mean_meas = torch.zeros(n_channels)\n samples = torch.zeros(n_channels, samplesLen_ns)\n spectrum = torch.zeros(n_channels, dft_len_ws)\n noise_spectrum = torch.zeros(n_channels, dft_len_ws)\n measures = torch.zeros(n_channels, measures_len)\n\n has_triggered: bool = False\n num_measures_to_flush: int = 0\n pos: int = 0\n\n while pos < ilen and not has_triggered:\n measure_timer_ns -= 1\n for i in range(n_channels):\n samples[i, samplesIndex_ns] = waveform[i, pos]\n # if (!p->measure_timer_ns) {\n if measure_timer_ns == 0:\n index_ns: int = (\n samplesIndex_ns + samplesLen_ns - measure_len_ns\n ) % samplesLen_ns\n meas: float = _measure(\n measure_len_ws=measure_len_ws,\n samples=samples[i],\n spectrum=spectrum[i],\n noise_spectrum=noise_spectrum[i],\n spectrum_window=spectrum_window,\n spectrum_start=spectrum_start,\n spectrum_end=spectrum_end,\n cepstrum_window=cepstrum_window,\n cepstrum_start=cepstrum_start,\n cepstrum_end=cepstrum_end,\n noise_reduction_amount=noise_reduction_amount,\n measure_smooth_time_mult=measure_smooth_time_mult,\n noise_up_time_mult=noise_up_time_mult,\n noise_down_time_mult=noise_down_time_mult,\n index_ns=index_ns,\n boot_count=boot_count,\n )\n measures[i, measures_index] = meas\n mean_meas[i] = mean_meas[i] * trigger_meas_time_mult + meas * (\n 1.0 - trigger_meas_time_mult\n )\n\n has_triggered = has_triggered or (mean_meas[i] >= trigger_level)\n if has_triggered:\n n: int = measures_len\n k: int = measures_index\n jTrigger: int = n\n jZero: int = n\n j: int = 0\n\n for j in range(n):\n if (measures[i, k] >= trigger_level) and (\n j <= jTrigger + gap_len\n ):\n jZero = jTrigger = j\n elif (measures[i, k] == 0) and (jTrigger >= jZero):\n jZero = j\n k = (k + n - 1) % n\n j = min(j, jZero)\n # num_measures_to_flush = range_limit(j, num_measures_to_flush, n);\n num_measures_to_flush = min(max(num_measures_to_flush, j), n)\n # end if has_triggered\n # end if (measure_timer_ns == 0):\n # end for\n samplesIndex_ns += 1\n pos += 1\n # end while\n if samplesIndex_ns == samplesLen_ns:\n samplesIndex_ns = 0\n if measure_timer_ns == 0:\n measure_timer_ns = measure_period_ns\n measures_index += 1\n measures_index = measures_index % measures_len\n if boot_count >= 0:\n boot_count = -1 if boot_count == boot_count_max else boot_count + 1\n\n if has_triggered:\n flushedLen_ns = (measures_len - num_measures_to_flush) * measure_period_ns\n samplesIndex_ns = (samplesIndex_ns + flushedLen_ns) % samplesLen_ns\n\n res = waveform[:, pos - samplesLen_ns + flushedLen_ns:]\n # unpack batch\n return res.view(shape[:-1] + res.shape[-1:])\n"
] | [
[
"torch.zeros",
"torch.round",
"torch.cat",
"torch.device",
"torch.bartlett_window",
"torch.floor",
"torch.sin",
"torch.arange",
"torch.max",
"torch.hann_window",
"torch.stack",
"torch.clamp",
"torch.randint",
"torch.tensor",
"torch.frac",
"torch.zeros_like",
"torch.nn.functional.pad"
]
] |
Line290/tsn-pytorch | [
"91ee075b98df9fa063adc57b296cb5fe15f43f8a"
] | [
"models.py"
] | [
"from torch import nn\n\nfrom ops.basic_ops import ConsensusModule, Identity\nfrom transforms import *\nfrom torch.nn.init import normal, constant\n\nclass TSN(nn.Module):\n def __init__(self, num_class, num_segments, modality,\n base_model='resnet101', new_length=None,\n consensus_type='avg', before_softmax=True,\n dropout=0.8,\n crop_num=1, partial_bn=True):\n super(TSN, self).__init__()\n self.modality = modality\n self.num_segments = num_segments\n self.reshape = True\n self.before_softmax = before_softmax\n self.dropout = dropout\n self.crop_num = crop_num\n self.consensus_type = consensus_type\n if not before_softmax and consensus_type != 'avg':\n raise ValueError(\"Only avg consensus can be used after Softmax\")\n\n if new_length is None:\n self.new_length = 1 if modality == \"RGB\" else 5\n else:\n self.new_length = new_length\n\n print((\"\"\"\nInitializing TSN with base model: {}.\nTSN Configurations:\n input_modality: {}\n num_segments: {}\n new_length: {}\n consensus_module: {}\n dropout_ratio: {}\n \"\"\".format(base_model, self.modality, self.num_segments, self.new_length, consensus_type, self.dropout)))\n\n self._prepare_base_model(base_model)\n\n feature_dim = self._prepare_tsn(num_class)\n\n if self.modality == 'Flow':\n print(\"Converting the ImageNet model to a flow init model\")\n self.base_model = self._construct_flow_model(self.base_model)\n print(\"Done. Flow model ready...\")\n elif self.modality == 'RGBDiff':\n print(\"Converting the ImageNet model to RGB+Diff init model\")\n self.base_model = self._construct_diff_model(self.base_model)\n print(\"Done. RGBDiff model ready.\")\n\n self.consensus = ConsensusModule(consensus_type)\n\n if not self.before_softmax:\n self.softmax = nn.Softmax()\n\n self._enable_pbn = partial_bn\n if partial_bn:\n self.partialBN(True)\n\n def _prepare_tsn(self, num_class):\n feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features\n if self.dropout == 0:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, num_class))\n self.new_fc = None\n else:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))\n self.new_fc = nn.Linear(feature_dim, num_class)\n\n std = 0.001\n if self.new_fc is None:\n normal(getattr(self.base_model, self.base_model.last_layer_name).weight, 0, std)\n constant(getattr(self.base_model, self.base_model.last_layer_name).bias, 0)\n else:\n normal(self.new_fc.weight, 0, std)\n constant(self.new_fc.bias, 0)\n return feature_dim\n\n def _prepare_base_model(self, base_model):\n\n if 'resnet' in base_model or 'vgg' in base_model:\n self.base_model = getattr(torchvision.models, base_model)(True)\n self.base_model.last_layer_name = 'fc'\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n if self.modality == 'Flow':\n self.input_mean = [0.5]\n self.input_std = [np.mean(self.input_std)]\n elif self.modality == 'RGBDiff':\n self.input_mean = [0.485, 0.456, 0.406] + [0] * 3 * self.new_length\n self.input_std = self.input_std + [np.mean(self.input_std) * 2] * 3 * self.new_length\n elif base_model == 'BNInception':\n import tf_model_zoo\n self.base_model = getattr(tf_model_zoo, base_model)()\n self.base_model.last_layer_name = 'fc'\n self.input_size = 224\n self.input_mean = [104, 117, 128]\n self.input_std = [1]\n\n if self.modality == 'Flow':\n self.input_mean = [128]\n elif self.modality == 'RGBDiff':\n self.input_mean = self.input_mean * (1 + self.new_length)\n\n elif 'inception' in base_model:\n import tf_model_zoo\n self.base_model = getattr(tf_model_zoo, base_model)()\n self.base_model.last_layer_name = 'classif'\n self.input_size = 299\n self.input_mean = [0.5]\n self.input_std = [0.5]\n else:\n raise ValueError('Unknown base model: {}'.format(base_model))\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() to freeze the BN parameters\n :return:\n \"\"\"\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n\n def partialBN(self, enable):\n self._enable_pbn = enable\n\n def get_optim_policies(self):\n first_conv_weight = []\n first_conv_bias = []\n normal_weight = []\n normal_bias = []\n bn = []\n\n conv_cnt = 0\n bn_cnt = 0\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d):\n ps = list(m.parameters())\n conv_cnt += 1\n if conv_cnt == 1:\n first_conv_weight.append(ps[0])\n if len(ps) == 2:\n first_conv_bias.append(ps[1])\n else:\n normal_weight.append(ps[0])\n if len(ps) == 2:\n normal_bias.append(ps[1])\n elif isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n normal_weight.append(ps[0])\n if len(ps) == 2:\n normal_bias.append(ps[1])\n \n elif isinstance(m, torch.nn.BatchNorm1d):\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm2d):\n bn_cnt += 1\n # later BN's are frozen\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif len(m._modules) == 0:\n if len(list(m.parameters())) > 0:\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\n\n return [\n {'params': first_conv_weight, 'lr_mult': 5 if self.modality == 'Flow' else 1, 'decay_mult': 1,\n 'name': \"first_conv_weight\"},\n {'params': first_conv_bias, 'lr_mult': 10 if self.modality == 'Flow' else 2, 'decay_mult': 0,\n 'name': \"first_conv_bias\"},\n {'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"normal_weight\"},\n {'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,\n 'name': \"normal_bias\"},\n {'params': bn, 'lr_mult': 1, 'decay_mult': 0,\n 'name': \"BN scale/shift\"},\n ]\n\n def forward(self, input):\n sample_len = (3 if self.modality == \"RGB\" else 2) * self.new_length\n\n if self.modality == 'RGBDiff':\n sample_len = 3 * self.new_length\n input = self._get_diff(input)\n\n base_out = self.base_model(input.view((-1, sample_len) + input.size()[-2:]))\n\n if self.dropout > 0:\n base_out = self.new_fc(base_out)\n\n if not self.before_softmax:\n base_out = self.softmax(base_out)\n if self.reshape:\n base_out = base_out.view((-1, self.num_segments) + base_out.size()[1:])\n\n output = self.consensus(base_out)\n return output.squeeze(1)\n\n def _get_diff(self, input, keep_rgb=False):\n input_c = 3 if self.modality in [\"RGB\", \"RGBDiff\"] else 2\n input_view = input.view((-1, self.num_segments, self.new_length + 1, input_c,) + input.size()[2:])\n if keep_rgb:\n new_data = input_view.clone()\n else:\n new_data = input_view[:, :, 1:, :, :, :].clone()\n\n for x in reversed(list(range(1, self.new_length + 1))):\n if keep_rgb:\n new_data[:, :, x, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n else:\n new_data[:, :, x - 1, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n\n return new_data\n\n\n def _construct_flow_model(self, base_model):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = list(filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules)))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n new_kernel_size = kernel_size[:1] + (2 * self.new_length, ) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n\n new_conv = nn.Conv2d(2 * self.new_length, conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convlution layer\n setattr(container, layer_name, new_conv)\n return base_model\n\n def _construct_diff_model(self, base_model, keep_rgb=False):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = list(filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules)))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n if not keep_rgb:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n else:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = torch.cat((params[0].data, params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()),\n 1)\n new_kernel_size = kernel_size[:1] + (3 + 3 * self.new_length,) + kernel_size[2:]\n\n new_conv = nn.Conv2d(new_kernel_size[1], conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convolution layer\n setattr(container, layer_name, new_conv)\n return base_model\n\n @property\n def crop_size(self):\n return self.input_size\n\n @property\n def scale_size(self):\n return self.input_size * 256 // 224\n\n def get_augmentation(self):\n if self.modality == 'RGB':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66]),\n GroupRandomHorizontalFlip(is_flow=False)])\n elif self.modality == 'Flow':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75]),\n GroupRandomHorizontalFlip(is_flow=True)])\n elif self.modality == 'RGBDiff':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75]),\n GroupRandomHorizontalFlip(is_flow=False)])\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.constant",
"torch.nn.Softmax",
"torch.nn.init.normal"
]
] |
t871005y/structural-analogy-cut | [
"3159dbc96124e73382f9497457902610ade46237"
] | [
"imresize.py"
] | [
"import numpy as np\nfrom scipy.ndimage import filters, measurements, interpolation\nfrom skimage import color\nfrom math import pi\n#from SinGAN.functions import torch2uint8, np2torch\nimport torch\n\n\ndef denorm(x):\n out = (x + 1) / 2\n return out.clamp(0, 1)\n\ndef norm(x):\n out = (x - 0.5) * 2\n return out.clamp(-1, 1)\n\ndef move_to_gpu(t):\n if (torch.cuda.is_available()):\n t = t.to(torch.device('cuda'))\n return t\n\n\ndef np2torch(x,opt):\n x = x[:,:,:,None]\n x = x.transpose((3, 2, 0, 1))/255\n x = torch.from_numpy(x)\n if not (opt.not_cuda):\n x = move_to_gpu(x)\n x = x.type(torch.cuda.FloatTensor) if not(opt.not_cuda) else x.type(torch.FloatTensor)\n #x = x.type(torch.cuda.FloatTensor)\n x = norm(x)\n return x\n\ndef np2torch_old(x,opt):\n if opt.nc_im == 3:\n x = x[:,:,:,None]\n x = x.transpose((3, 2, 0, 1))/255\n else:\n x = color.rgb2gray(x)\n x = x[:,:,None,None]\n x = x.transpose(3, 2, 0, 1)\n x = torch.from_numpy(x)\n if not (opt.not_cuda):\n x = move_to_gpu(x)\n x = x.type(torch.cuda.FloatTensor) if not(opt.not_cuda) else x.type(torch.FloatTensor)\n #x = x.type(torch.cuda.FloatTensor)\n x = norm(x)\n return x\n\ndef torch2uint8(x):\n x = x[0,:,:,:]\n x = x.permute((1,2,0))\n x = 255*denorm(x)\n x = x.cpu().numpy()\n x = x.astype(np.uint8)\n #print(\"torch2uint8\")\n #print(x.shape)\n return x\n\n\ndef imresize2(im,scale,opt):\n #s = im.shape\n #print(\"!!!!!\")\n #print(im.size())\n\n size = im.size()\n imgs = []\n for i in range(size[0]):\n imgs.append(imresize(im[i].unsqueeze(0), scale, opt).squeeze(0))\n\n res = torch.stack(imgs, dim=0)\n\n #print(res.size())\n #im = im[:, :, 0:int(scale * s[2]), 0:int(scale * s[3])]\n return res\n\ndef imresize(im,scale,opt):\n #s = im.shape\n #print(\"------\")\n #print(im.size())\n im = torch2uint8(im)\n im = imresize_in(im, scale_factor=scale)\n #print(im.shape)\n im = np2torch(im,opt)\n #print(im.size())\n #im = im[:, :, 0:int(scale * s[2]), 0:int(scale * s[3])]\n return im\n\ndef imresize_arr(arr,scale,opt):\n res = []\n for im in arr:\n im = torch2uint8(im)\n im = imresize_in(im, scale_factor=scale)\n im = np2torch(im,opt)\n res.append(im)\n return res\n\ndef imresize_to_shape(im,output_shape,opt):\n #s = im.shape\n im = torch2uint8(im)\n im = imresize_in(im, output_shape=output_shape)\n im = np2torch(im,opt)\n #im = im[:, :, 0:int(scale * s[2]), 0:int(scale * s[3])]\n return im\n\n\ndef imresize_in(im, scale_factor=None, output_shape=None, kernel=None, antialiasing=True, kernel_shift_flag=False):\n # First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa\n scale_factor, output_shape = fix_scale_and_size(im.shape, output_shape, scale_factor)\n\n # For a given numeric kernel case, just do convolution and sub-sampling (downscaling only)\n if type(kernel) == np.ndarray and scale_factor[0] <= 1:\n return numeric_kernel(im, kernel, scale_factor, output_shape, kernel_shift_flag)\n\n # Choose interpolation method, each method has the matching kernel size\n method, kernel_width = {\n \"cubic\": (cubic, 4.0),\n \"lanczos2\": (lanczos2, 4.0),\n \"lanczos3\": (lanczos3, 6.0),\n \"box\": (box, 1.0),\n \"linear\": (linear, 2.0),\n None: (cubic, 4.0) # set default interpolation method as cubic\n }.get(kernel)\n\n # Antialiasing is only used when downscaling\n antialiasing *= (scale_factor[0] < 1)\n\n # Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient\n sorted_dims = np.argsort(np.array(scale_factor)).tolist()\n\n # Iterate over dimensions to calculate local weights for resizing and resize each time in one direction\n out_im = np.copy(im)\n for dim in sorted_dims:\n # No point doing calculations for scale-factor 1. nothing will happen anyway\n if scale_factor[dim] == 1.0:\n continue\n\n # for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the\n # weights that multiply the values there to get its result.\n weights, field_of_view = contributions(im.shape[dim], output_shape[dim], scale_factor[dim],\n method, kernel_width, antialiasing)\n\n # Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim\n out_im = resize_along_dim(out_im, dim, weights, field_of_view)\n\n return out_im\n\n\ndef fix_scale_and_size(input_shape, output_shape, scale_factor):\n # First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the\n # same size as the number of input dimensions)\n if scale_factor is not None:\n # By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.\n if np.isscalar(scale_factor):\n scale_factor = [scale_factor, scale_factor]\n\n # We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales\n scale_factor = list(scale_factor)\n scale_factor.extend([1] * (len(input_shape) - len(scale_factor)))\n\n # Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size\n # to all the unspecified dimensions\n if output_shape is not None:\n output_shape = list(np.uint(np.array(output_shape))) + list(input_shape[len(output_shape):])\n\n # Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is\n # sub-optimal, because there can be different scales to the same output-shape.\n if scale_factor is None:\n scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape)\n\n # Dealing with missing output-shape. calculating according to scale-factor\n if output_shape is None:\n output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))\n\n return scale_factor, output_shape\n\n\ndef contributions(in_length, out_length, scale, kernel, kernel_width, antialiasing):\n # This function calculates a set of 'filters' and a set of field_of_view that will later on be applied\n # such that each position from the field_of_view will be multiplied with a matching filter from the\n # 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers\n # around it. This is only done for one dimension of the image.\n\n # When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of\n # 1/sf. this means filtering is more 'low-pass filter'.\n fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel\n kernel_width *= 1.0 / scale if antialiasing else 1.0\n\n # These are the coordinates of the output image\n out_coordinates = np.arange(1, out_length+1)\n\n # These are the matching positions of the output-coordinates on the input image coordinates.\n # Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:\n # [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.\n # The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to\n # the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big\n # one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).\n # So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is\n # at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:\n # (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)\n match_coordinates = 1.0 * out_coordinates / scale + 0.5 * (1 - 1.0 / scale)\n\n # This is the left boundary to start multiplying the filter from, it depends on the size of the filter\n left_boundary = np.floor(match_coordinates - kernel_width / 2)\n\n # Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers\n # of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)\n expanded_kernel_width = np.ceil(kernel_width) + 2\n\n # Determine a set of field_of_view for each each output position, these are the pixels in the input image\n # that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the\n # vertical dim is the pixels it 'sees' (kernel_size + 2)\n field_of_view = np.squeeze(np.uint(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1))\n\n # Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the\n # vertical dim is a list of weights matching to the pixel in the field of view (that are specified in\n # 'field_of_view')\n weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)\n\n # Normalize weights to sum up to 1. be careful from dividing by 0\n sum_weights = np.sum(weights, axis=1)\n sum_weights[sum_weights == 0] = 1.0\n weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)\n\n # We use this mirror structure as a trick for reflection padding at the boundaries\n mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))\n field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]\n\n # Get rid of weights and pixel positions that are of zero weight\n non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))\n weights = np.squeeze(weights[:, non_zero_out_pixels])\n field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])\n\n # Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size\n return weights, field_of_view\n\n\ndef resize_along_dim(im, dim, weights, field_of_view):\n # To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize\n tmp_im = np.swapaxes(im, dim, 0)\n\n # We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for\n # tmp_im[field_of_view.T], (bsxfun style)\n weights = np.reshape(weights.T, list(weights.T.shape) + (np.ndim(im) - 1) * [1])\n\n # This is a bit of a complicated multiplication: tmp_im[field_of_view.T] is a tensor of order image_dims+1.\n # for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim\n # only, this is why it only adds 1 dim to the shape). We then multiply, for each pixel, its set of positions with\n # the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:\n # matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the\n # same number\n tmp_out_im = np.sum(tmp_im[field_of_view.T] * weights, axis=0)\n\n # Finally we swap back the axes to the original order\n return np.swapaxes(tmp_out_im, dim, 0)\n\n\ndef numeric_kernel(im, kernel, scale_factor, output_shape, kernel_shift_flag):\n # See kernel_shift function to understand what this is\n if kernel_shift_flag:\n kernel = kernel_shift(kernel, scale_factor)\n\n # First run a correlation (convolution with flipped kernel)\n out_im = np.zeros_like(im)\n for channel in range(np.ndim(im)):\n out_im[:, :, channel] = filters.correlate(im[:, :, channel], kernel)\n\n # Then subsample and return\n return out_im[np.round(np.linspace(0, im.shape[0] - 1 / scale_factor[0], output_shape[0])).astype(int)[:, None],\n np.round(np.linspace(0, im.shape[1] - 1 / scale_factor[1], output_shape[1])).astype(int), :]\n\n\ndef kernel_shift(kernel, sf):\n # There are two reasons for shifting the kernel:\n # 1. Center of mass is not in the center of the kernel which creates ambiguity. There is no possible way to know\n # the degradation process included shifting so we always assume center of mass is center of the kernel.\n # 2. We further shift kernel center so that top left result pixel corresponds to the middle of the sfXsf first\n # pixels. Default is for odd size to be in the middle of the first pixel and for even sized kernel to be at the\n # top left corner of the first pixel. that is why different shift size needed between od and even size.\n # Given that these two conditions are fulfilled, we are happy and aligned, the way to test it is as follows:\n # The input image, when interpolated (regular bicubic) is exactly aligned with ground truth.\n\n # First calculate the current center of mass for the kernel\n current_center_of_mass = measurements.center_of_mass(kernel)\n\n # The second (\"+ 0.5 * ....\") is for applying condition 2 from the comments above\n wanted_center_of_mass = np.array(kernel.shape) / 2 + 0.5 * (sf - (kernel.shape[0] % 2))\n\n # Define the shift vector for the kernel shifting (x,y)\n shift_vec = wanted_center_of_mass - current_center_of_mass\n\n # Before applying the shift, we first pad the kernel so that nothing is lost due to the shift\n # (biggest shift among dims + 1 for safety)\n kernel = np.pad(kernel, np.int(np.ceil(np.max(shift_vec))) + 1, 'constant')\n\n # Finally shift the kernel and return\n return interpolation.shift(kernel, shift_vec)\n\n\n# These next functions are all interpolation methods. x is the distance from the left pixel center\n\n\ndef cubic(x):\n absx = np.abs(x)\n absx2 = absx ** 2\n absx3 = absx ** 3\n return ((1.5*absx3 - 2.5*absx2 + 1) * (absx <= 1) +\n (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * ((1 < absx) & (absx <= 2)))\n\n\ndef lanczos2(x):\n return (((np.sin(pi*x) * np.sin(pi*x/2) + np.finfo(np.float32).eps) /\n ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps))\n * (abs(x) < 2))\n\n\ndef box(x):\n return ((-0.5 <= x) & (x < 0.5)) * 1.0\n\n\ndef lanczos3(x):\n return (((np.sin(pi*x) * np.sin(pi*x/3) + np.finfo(np.float32).eps) /\n ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps))\n * (abs(x) < 3))\n\n\ndef linear(x):\n return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))\n"
] | [
[
"torch.stack",
"numpy.copy",
"numpy.finfo",
"torch.cuda.is_available",
"scipy.ndimage.measurements.center_of_mass",
"numpy.max",
"numpy.zeros_like",
"numpy.sin",
"numpy.swapaxes",
"numpy.arange",
"numpy.ndim",
"scipy.ndimage.filters.correlate",
"numpy.expand_dims",
"numpy.mod",
"torch.device",
"numpy.array",
"numpy.isscalar",
"numpy.squeeze",
"numpy.floor",
"scipy.ndimage.interpolation.shift",
"numpy.ceil",
"numpy.sum",
"torch.from_numpy",
"numpy.any",
"numpy.abs",
"numpy.linspace"
]
] |
PangHua/InfiniteScience | [
"67378d2625c4d70d5b59d5e7a3f904284bfe65da"
] | [
"examples/lettuce_paddle/lettuce/reporters.py"
] | [
"# code was heavily based on https://github.com/lettucecfd/lettuce\r\n# Users should be careful about adopting these functions in any commercial matters.\r\n# https://github.com/lettucecfd/lettuce#license\r\n\r\n\"\"\"\r\nInput/output routines.\r\nTODO: Logging\r\n\"\"\"\r\n\r\nimport sys\r\nimport warnings\r\nimport os\r\nimport numpy as np\r\nimport paddle\r\nimport pyevtk.hl as vtk\r\nfrom lettuce.utils import pdprod,pdnorm\r\n\r\n__all__ = [\r\n \"write_image\", \"write_vtk\", \"VTKReporter\", \"ObservableReporter\", \"ErrorReporter\"\r\n]\r\n\r\n\r\ndef write_image(filename, array2d):\r\n from matplotlib import pyplot as plt\r\n fig, ax = plt.subplots()\r\n plt.tight_layout()\r\n ax.imshow(array2d)\r\n ax.set_xlabel('')\r\n ax.set_ylabel('')\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n plt.savefig(filename)\r\n\r\n\r\ndef write_vtk(point_dict, id=0, filename_base=\"./data/output\"):\r\n vtk.gridToVTK(f\"{filename_base}_{id:08d}\",\r\n np.arange(0, point_dict[\"p\"].shape[0]),\r\n np.arange(0, point_dict[\"p\"].shape[1]),\r\n np.arange(0, point_dict[\"p\"].shape[2]),\r\n pointData=point_dict)\r\n\r\n\r\nclass VTKReporter:\r\n \"\"\"General VTK Reporter for velocity and pressure\"\"\"\r\n\r\n def __init__(self, lattice, flow, interval=50, filename_base=\"./data/output\"):\r\n self.lattice = lattice\r\n self.flow = flow\r\n self.interval = interval\r\n self.filename_base = filename_base\r\n directory = os.path.dirname(filename_base)\r\n if not os.path.isdir(directory):\r\n os.mkdir(directory)\r\n self.point_dict = dict()\r\n\r\n def __call__(self, i, t, f):\r\n if i % self.interval == 0:\r\n u = self.flow.units.convert_velocity_to_pu(self.lattice.u(f))\r\n p = self.flow.units.convert_density_lu_to_pressure_pu(self.lattice.rho(f))\r\n if self.lattice.D == 2:\r\n self.point_dict[\"p\"] = self.lattice.convert_to_numpy(p[0, ..., None])\r\n for d in range(self.lattice.D):\r\n self.point_dict[f\"u{'xyz'[d]}\"] = self.lattice.convert_to_numpy(u[d, ..., None])\r\n else:\r\n self.point_dict[\"p\"] = self.lattice.convert_to_numpy(p[0, ...])\r\n for d in range(self.lattice.D):\r\n self.point_dict[f\"u{'xyz'[d]}\"] = self.lattice.convert_to_numpy(u[d, ...])\r\n write_vtk(self.point_dict, i, self.filename_base)\r\n\r\n def output_mask(self, no_collision_mask):\r\n \"\"\"Outputs the no_collision_mask of the simulation object as VTK-file with range [0,1]\r\n Usage: vtk_reporter.output_mask(simulation.no_collision_mask)\"\"\"\r\n point_dict = dict()\r\n if self.lattice.D == 2:\r\n point_dict[\"mask\"] = self.lattice.convert_to_numpy(no_collision_mask)[..., None].astype(int)\r\n else:\r\n point_dict[\"mask\"] = self.lattice.convert_to_numpy(no_collision_mask).astype(int)\r\n vtk.gridToVTK(self.filename_base + \"_mask\",\r\n np.arange(0, point_dict[\"mask\"].shape[0]),\r\n np.arange(0, point_dict[\"mask\"].shape[1]),\r\n np.arange(0, point_dict[\"mask\"].shape[2]),\r\n pointData=point_dict)\r\n\r\n\r\nclass ErrorReporter:\r\n \"\"\"Reports numerical errors with respect to analytic solution.\"\"\"\r\n\r\n def __init__(self, lattice, flow, interval=1, out=sys.stdout):\r\n assert hasattr(flow, \"analytic_solution\")\r\n self.lattice = lattice\r\n self.flow = flow\r\n self.interval = interval\r\n self.out = [] if out is None else out\r\n if not isinstance(self.out, list):\r\n print(\"#error_u error_p\", file=self.out)\r\n\r\n def __call__(self, i, t, f):\r\n if i % self.interval == 0:\r\n pref, uref = self.flow.analytic_solution(self.flow.grid, t=t)\r\n pref = self.lattice.convert_to_tensor(pref)\r\n uref = self.lattice.convert_to_tensor(uref)\r\n u = self.flow.units.convert_velocity_to_pu(self.lattice.u(f))\r\n p = self.flow.units.convert_density_lu_to_pressure_pu(self.lattice.rho(f))\r\n\r\n resolution = paddle.pow(pdprod(self.lattice.convert_to_tensor(p.shape)), 1 / self.lattice.D)\r\n\r\n err_u = pdnorm(u - uref) / resolution ** (self.lattice.D / 2)\r\n err_p = pdnorm(p - pref) / resolution ** (self.lattice.D / 2)\r\n\r\n if isinstance(self.out, list):\r\n self.out.append([err_u.item(), err_p.item()])\r\n else:\r\n print(err_u.item(), err_p.item(), file=self.out)\r\n\r\n\r\nclass ObservableReporter:\r\n \"\"\"A reporter that prints an observable every few iterations.\r\n\r\n Examples\r\n --------\r\n Create an Enstrophy reporter.\r\n\r\n >>> from lettuce import TaylorGreenVortex3D, Enstrophy, D3Q27, Lattice\r\n >>> lattice = Lattice(D3Q27, device=\"cpu\")\r\n >>> flow = TaylorGreenVortex(50, 300, 0.1, lattice)\r\n >>> enstrophy = Enstrophy(lattice, flow)\r\n >>> reporter = ObservableReporter(enstrophy, interval=10)\r\n >>> # simulation = ...\r\n >>> # simulation.reporters.append(reporter)\r\n \"\"\"\r\n\r\n def __init__(self, observable, interval=1, out=sys.stdout):\r\n self.observable = observable\r\n self.interval = interval\r\n self.out = [] if out is None else out\r\n self._parameter_name = observable.__class__.__name__\r\n print('steps ', 'time ', self._parameter_name)\r\n\r\n def __call__(self, i, t, f):\r\n if i % self.interval == 0:\r\n observed = self.observable.lattice.convert_to_numpy(self.observable(f))\r\n assert len(observed.shape) < 2\r\n if len(observed.shape) == 0:\r\n observed = [observed.item()]\r\n else:\r\n observed = observed.tolist()\r\n entry = [i, t] + observed\r\n if isinstance(self.out, list):\r\n self.out.append(entry)\r\n else:\r\n print(*entry, file=self.out)\r\n"
] | [
[
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
]
] |
zengjia110/tensorflow | [
"6dd278831a62be829ce6f15039e5b6b368b3727c"
] | [
"tensorflow/python/client/timeline_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.python.client.Timeline.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.client import timeline\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass TimelineTest(test.TestCase):\n\n def _validateTrace(self, chrome_trace_format):\n # Check that the supplied string is valid JSON.\n trace = json.loads(chrome_trace_format)\n # It should have a top-level key containing events.\n self.assertTrue('traceEvents' in trace)\n # Every event in the list should have a 'ph' field.\n for event in trace['traceEvents']:\n self.assertTrue('ph' in event)\n\n def testSimpleTimeline(self):\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n with ops.device('/cpu:0'):\n with session.Session() as sess:\n sess.run(constant_op.constant(1.0),\n options=run_options,\n run_metadata=run_metadata)\n self.assertTrue(run_metadata.HasField('step_stats'))\n tl = timeline.Timeline(run_metadata.step_stats)\n ctf = tl.generate_chrome_trace_format()\n self._validateTrace(ctf)\n\n def testTimelineCpu(self):\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n with self.test_session(use_gpu=False) as sess:\n const1 = constant_op.constant(1.0, name='const1')\n const2 = constant_op.constant(2.0, name='const2')\n result = math_ops.add(const1, const2) + const1 * const2\n sess.run(result, options=run_options, run_metadata=run_metadata)\n self.assertTrue(run_metadata.HasField('step_stats'))\n step_stats = run_metadata.step_stats\n devices = [d.device for d in step_stats.dev_stats]\n self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format()\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(show_dataflow=False)\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(show_memory=False)\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(\n show_memory=False, show_dataflow=False)\n self._validateTrace(ctf)\n\n def testTimelineGpu(self):\n if not test.is_gpu_available(cuda_only=True):\n return\n\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n with self.test_session(force_gpu=True) as sess:\n const1 = constant_op.constant(1.0, name='const1')\n const2 = constant_op.constant(2.0, name='const2')\n result = math_ops.add(const1, const2) + const1 * const2\n sess.run(result, options=run_options, run_metadata=run_metadata)\n self.assertTrue(run_metadata.HasField('step_stats'))\n step_stats = run_metadata.step_stats\n devices = [d.device for d in step_stats.dev_stats]\n self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices)\n self.assertTrue('/device:GPU:0/stream:all' in devices)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format()\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(show_dataflow=False)\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(show_memory=False)\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(\n show_memory=False, show_dataflow=False)\n self._validateTrace(ctf)\n\n def testTimelineWithRPCs(self):\n \"\"\"Tests that Timeline can handle RPC tracing.\"\"\"\n metadata = config_pb2.RunMetadata()\n step_stats = metadata.step_stats\n dev_stats = step_stats.dev_stats.add()\n dev_stats.device = '/job:worker/replica:0/task:0/cpu:0'\n node_stats = dev_stats.node_stats.add()\n node_stats.node_name = 'RecvTensor'\n node_stats.all_start_micros = 12345\n node_stats.op_end_rel_micros = 42\n node_stats.timeline_label = ('[1024B] edge_160_conv2/biases/read from '\n '/job:ps/replica:0/task:3/cpu:0 to '\n '/job:worker/replica:0/task:0/cpu:0')\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format()\n self._validateTrace(ctf)\n\n def testAnalysisAndAllocations(self):\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n config = config_pb2.ConfigProto(device_count={'CPU': 3})\n\n with session.Session(config=config) as sess:\n with ops.device('/cpu:0'):\n num1 = variables.Variable(1.0, name='num1')\n with ops.device('/cpu:1'):\n num2 = variables.Variable(2.0, name='num2')\n with ops.device('/cpu:2'):\n result = num1 + num2 + num1 * num2\n sess.run(variables.global_variables_initializer())\n sess.run(result, options=run_options, run_metadata=run_metadata)\n\n self.assertTrue(run_metadata.HasField('step_stats'))\n tl = timeline.Timeline(run_metadata.step_stats)\n step_analysis = tl.analyze_step_stats()\n ctf = step_analysis.chrome_trace.format_to_string()\n self._validateTrace(ctf)\n maximums = step_analysis.allocator_maximums\n cpuname = 'mklcpu' if test_util.IsMklEnabled() else 'cpu'\n self.assertTrue(cpuname in maximums)\n cpu_max = maximums[\n 'cuda_host_bfc'] if 'cuda_host_bfc' in maximums else maximums[cpuname]\n # At least num1 + num2, both float32s (4 bytes each)\n self.assertGreaterEqual(cpu_max.num_bytes, 8)\n self.assertGreater(cpu_max.timestamp, 0)\n self.assertTrue('num1' in cpu_max.tensors or 'num1/read' in cpu_max.tensors)\n self.assertTrue('num2' in cpu_max.tensors or 'num2/read' in cpu_max.tensors)\n\n def testManyCPUs(self):\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n config = config_pb2.ConfigProto(device_count={'CPU': 3})\n with session.Session(config=config) as sess:\n with ops.device('/cpu:0'):\n num1 = variables.Variable(1.0, name='num1')\n with ops.device('/cpu:1'):\n num2 = variables.Variable(2.0, name='num2')\n with ops.device('/cpu:2'):\n result = num1 + num2 + num1 * num2\n sess.run(variables.global_variables_initializer())\n sess.run(result, options=run_options, run_metadata=run_metadata)\n self.assertTrue(run_metadata.HasField('step_stats'))\n step_stats = run_metadata.step_stats\n devices = [d.device for d in step_stats.dev_stats]\n self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)\n self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:1' in devices)\n self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:2' in devices)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format()\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(show_dataflow=False)\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(show_memory=False)\n self._validateTrace(ctf)\n tl = timeline.Timeline(step_stats)\n ctf = tl.generate_chrome_trace_format(\n show_memory=False, show_dataflow=False)\n self._validateTrace(ctf)\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.client.timeline.Timeline",
"tensorflow.python.framework.test_util.IsMklEnabled",
"tensorflow.python.ops.variables.Variable",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.client.session.Session",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.global_variables_initializer"
]
] |
schlafly/gfa_reduce | [
"9f68a8245a2f1bef3e11901bca7a4c6f45587faa"
] | [
"py/ci_reduce/analysis/djs_maskinterp.py"
] | [
"import numpy as np\nfrom scipy.interpolate import interp1d\nimport astropy.io.fits as fits\nimport time\n\ndef maskinterp1(yval, mask):\n# omitting xval arg (assume regular grid), omitting const kw arg\n# (assume const=True behavior is desired)\n\n yval = np.array(yval)\n\n mask = mask.astype(int)\n\n bad = (mask != 0)\n if np.sum(bad) == 0:\n return yval\n\n good = (mask == 0)\n ngood = np.sum(good)\n if ngood == 0:\n return yval\n \n if np.sum(good) == 1:\n return yval*0 + yval[good][0]\n\n ynew = yval\n ny = len(yval)\n\n igood = (np.where(good))[0]\n ibad = (np.where(bad))[0]\n f = interp1d(igood, yval[igood], kind='linear', fill_value='extrapolate')\n\n yval[bad] = f(ibad)\n\n # do the /const part\n if igood[0] != 0:\n ynew[0:igood[0]] = ynew[igood[0]]\n if igood[ngood-1] != (ny-1):\n ynew[(igood[ngood-1]+1):ny] = ynew[igood[ngood-1]]\n\n return ynew\n\ndef maskinterp(yval, mask, axis):\n\n mask = mask.astype(int)\n\n sh_yval = yval.shape\n sh_mask = mask.shape\n\n assert(len(sh_yval) == 2)\n assert(len(sh_mask) == 2)\n\n assert((sh_yval[0] == sh_mask[0]) and (sh_yval[1] == sh_mask[1]))\n\n assert((axis == 0) or (axis == 1))\n\n wbad = (np.where(mask != 0))\n\n if axis == 0:\n # the y coord values of rows that need some interpolation\n bad_stripe_indices = np.unique(wbad[0])\n else:\n # the x coord values of columns that need some interpolation\n bad_stripe_indices = np.unique(wbad[1])\n\n if len(bad_stripe_indices) == 0:\n return yval\n\n for ind in bad_stripe_indices:\n if axis == 0:\n yval[ind, :] = maskinterp1(yval[ind, :], mask[ind, :])\n else:\n yval[:, ind] = maskinterp1(yval[:, ind], mask[:, ind])\n\n return yval\n\ndef average_bilinear(yval, mask):\n int0 = maskinterp(yval, mask, 0)\n int1 = maskinterp(yval, mask, 1)\n interp = (int0 + int1)/2.0\n\n return interp\n\ndef test(axis=0):\n mask = fits.getdata('/scratch1/scratchdirs/ameisner/dci-03336/dci-03336_bitmask.fits.gz')\n\n im = fits.getdata('/scratch1/scratchdirs/ameisner/dci-03336/dci-03336_reduced.fits.gz')\n\n t0 = time.time()\n interp = maskinterp(im, mask, axis)\n dt = time.time()-t0\n print(dt)\n\n return interp\n"
] | [
[
"numpy.array",
"scipy.interpolate.interp1d",
"numpy.sum",
"numpy.where",
"numpy.unique"
]
] |
Cyberpredator21/Test | [
"1025e359f103e2256ded9821f40db7e3d8eb1584"
] | [
"iris.py"
] | [
"import pandas as pd\nimport numpy as np\nimport pickle\n\ndf = pd.read_csv('iris.data')\n\nX = np.array(df.iloc[:, 0:4])\ny = np.array(df.iloc[:, 4:])\n\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\ny = le.fit_transform(y)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nfrom sklearn.svm import SVC\nsv = SVC(kernel='linear').fit(X_train,y_train)\n\n\npickle.dump(sv, open('iri.pkl', 'wb'))\n"
] | [
[
"numpy.array",
"sklearn.preprocessing.LabelEncoder",
"sklearn.svm.SVC",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
Adrian609/google-research | [
"88481d10a87947ffb9305dc7665682e008b27391"
] | [
"uncertainties/scripts/train.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Script to launch the algorithms on the last layer uncertainties.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nimport gin.tf\nimport numpy as np\nimport six\nimport tensorflow.compat.v1 as tf\n\n\nimport uncertainties.sources.cifar.cifar_input_python as cifar_input\nimport uncertainties.sources.mnist.mnist_input as mnist_input\nimport uncertainties.sources.models.bootstrap as bootstrap\nimport uncertainties.sources.models.dropout as dropout\nimport uncertainties.sources.models.precond as precond\nimport uncertainties.sources.models.simple as simple\nimport uncertainties.sources.postprocessing.metrics as metrics\nimport uncertainties.sources.postprocessing.postprocess as postprocess\nimport uncertainties.sources.utils.util as util\n\n\nFLAGS = flags.FLAGS\n\n\nflags.DEFINE_string('baseroute', None, 'Directory for inputs.')\nflags.DEFINE_string('workdir', None, 'Directory for outputs.')\nflags.DEFINE_string('dataset', None,\n 'Name of the dataset {cifar10, cifar100, mnist}')\nflags.DEFINE_string('algorithm', None,\n 'Name of the algorithm'\n '{simple, precond, dropout, bootstrap}')\nflags.DEFINE_multi_string('gin_config', [],\n 'List of paths to the config files.')\n\nflags.DEFINE_multi_string('gin_bindings', [],\n 'Newline separated list of Gin parameter bindings.')\n\n\ndef main(unused_argv):\n\n FLAGS.gin_bindings = [\n x if \"@\" not in x else six.ensure_str(x).replace(\"\\\"\", \"\") # pylint: disable=g-inconsistent-quotes\n for x in FLAGS.gin_bindings\n ]\n gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_bindings)\n if FLAGS.dataset == 'mnist':\n train_mnist()\n elif FLAGS.dataset == 'cifar10':\n train_cifar10()\n elif FLAGS.dataset == 'cifar100':\n train_cifar100()\n else:\n raise NotImplementedError('Dataset not implemented')\n\n\[email protected]('train_mnist')\ndef train_mnist(features_mnist_train=gin.REQUIRED,\n features_mnist_test=gin.REQUIRED,\n data_path_mnist=gin.REQUIRED,\n features_notmnist_test=gin.REQUIRED,\n labels_notmnist_test=gin.REQUIRED,\n model_dir=gin.REQUIRED,\n dim_input=gin.REQUIRED,\n num_classes=gin.REQUIRED\n ):\n \"\"\"Training function.\"\"\"\n # Define the paths\n data_path_mnist = os.path.join(FLAGS.baseroute, data_path_mnist)\n features_mnist_train = os.path.join(FLAGS.baseroute, features_mnist_train)\n features_mnist_test = os.path.join(FLAGS.baseroute, features_mnist_test)\n features_notmnist_test = os.path.join(FLAGS.baseroute, features_notmnist_test)\n labels_notmnist_test = os.path.join(FLAGS.baseroute, labels_notmnist_test)\n model_dir = os.path.join(FLAGS.baseroute, model_dir)\n # Load the features for mnist\n (_, y_train), (_, y_test) = mnist_input.load_data(data_path_mnist)\n with tf.gfile.Open(features_mnist_train, 'r') as f:\n x_train = np.load(f)\n with tf.gfile.Open(features_mnist_test, 'r') as f:\n x_test = np.load(f)\n dataset = (x_train, y_train), (x_test, y_test)\n\n model = build_model(dataset, model_dir, dim_input, num_classes)\n\n # Load notmnist features and labels test dataset\n with tf.gfile.Open(features_notmnist_test, 'r') as f:\n x_notmnist = np.load(f)\n with tf.gfile.Open(labels_notmnist_test, 'r') as f:\n y_notmnist = np.load(f)\n y_notmnist = tf.keras.utils.to_categorical(y_notmnist, num_classes=10)\n\n # Compute output probabilities on x_test and x_test_notmnist\n # Warning: take time, pass over all the saved weights.\n x = np.vstack((x_test, x_notmnist))\n model.predict(x)\n\n # Dictionary for y - Metrics\n y_dic = {'mnist': y_test, 'notmnist': y_notmnist}\n\n # Postprocessing and metrics\n if FLAGS.algorithm in ['simple', 'dropout', 'precond']:\n postprocess.postprocess_mnist(FLAGS.workdir)\n for dataset_str in ['mnist', 'notmnist']:\n path_postprocess = os.path.join(FLAGS.workdir, dataset_str)\n metrics.Metrics(y_dic[dataset_str], path_postprocess)\n\n # Write the gin config in the working directory\n util.save_gin(os.path.join(FLAGS.workdir, 'gin_configuration.txt'))\n util.write_gin(FLAGS.workdir)\n\n\[email protected]('train_cifar10')\ndef train_cifar10(features_cifar10_train=gin.REQUIRED,\n features_cifar10_test=gin.REQUIRED,\n data_path_cifar10=gin.REQUIRED,\n features_cifar10_train_distorted=gin.REQUIRED,\n distorted=gin.REQUIRED,\n model_dir=gin.REQUIRED,\n dim_input=gin.REQUIRED,\n num_classes=gin.REQUIRED\n ):\n \"\"\"Training function.\"\"\"\n # Define the paths\n features_cifar10_train = os.path.join(FLAGS.baseroute, features_cifar10_train)\n features_cifar10_test = os.path.join(FLAGS.baseroute, features_cifar10_test)\n data_path_cifar10 = os.path.join(FLAGS.baseroute, data_path_cifar10)\n features_cifar10_train_distorted = os.path.join(\n FLAGS.baseroute, features_cifar10_train_distorted)\n model_dir = os.path.join(FLAGS.baseroute, model_dir)\n # Load the features for cifar10\n (_, y_train), (_, y_test) = cifar_input.load_data(distorted,\n data_path_cifar10,\n 'cifar10')\n if distorted:\n with tf.gfile.Open(features_cifar10_train_distorted, 'r') as f:\n x_train = np.load(f)\n else:\n with tf.gfile.Open(features_cifar10_train, 'r') as f:\n x_train = np.load(f)\n with tf.gfile.Open(features_cifar10_test, 'r') as f:\n x_test = np.load(f)\n dataset = (x_train, y_train), (x_test, y_test)\n\n model = build_model(dataset, model_dir, dim_input, num_classes)\n\n # Compute output probabilities on x_test\n # Warning: take time, pass over all the saved weights.\n model.predict(x_test)\n\n # Postprocessing and metrics\n if FLAGS.algorithm in ['simple', 'dropout', 'precond']:\n postprocess.postprocess_cifar(FLAGS.workdir, 'cifar10')\n path_postprocess = os.path.join(FLAGS.workdir, 'cifar10')\n metrics.Metrics(y_test, path_postprocess)\n\n # Write the gin config in the working directory\n util.save_gin(os.path.join(FLAGS.workdir, 'gin_configuration.txt'))\n util.write_gin(FLAGS.workdir)\n\n\[email protected]('train_cifar100')\ndef train_cifar100(features_cifar100_train=gin.REQUIRED,\n features_cifar100_test=gin.REQUIRED,\n data_path_cifar100=gin.REQUIRED,\n features_cifar100_train_distorted=gin.REQUIRED,\n distorted=gin.REQUIRED,\n model_dir=gin.REQUIRED,\n dim_input=gin.REQUIRED,\n num_classes=gin.REQUIRED\n ):\n \"\"\"Training function.\"\"\"\n # Define the paths\n features_cifar100_train = os.path.join(FLAGS.baseroute,\n features_cifar100_train)\n features_cifar100_test = os.path.join(FLAGS.baseroute, features_cifar100_test)\n data_path_cifar100 = os.path.join(FLAGS.baseroute, data_path_cifar100)\n features_cifar100_train_distorted = os.path.join(\n FLAGS.baseroute, features_cifar100_train_distorted)\n model_dir = os.path.join(FLAGS.baseroute, model_dir)\n # Load the features for cifar100\n (_, y_train), (_, y_test) = cifar_input.load_data(distorted,\n data_path_cifar100,\n 'cifar100')\n if distorted:\n with tf.gfile.Open(features_cifar100_train_distorted, 'r') as f:\n x_train = np.load(f)\n else:\n with tf.gfile.Open(features_cifar100_train, 'r') as f:\n x_train = np.load(f)\n with tf.gfile.Open(features_cifar100_test, 'r') as f:\n x_test = np.load(f)\n dataset = (x_train, y_train), (x_test, y_test)\n\n model = build_model(dataset, model_dir, dim_input, num_classes)\n\n # Compute output probabilities on x_test\n # Warning: take time, pass over all the saved weights.\n model.predict(x_test)\n\n # Postprocessing and metrics\n if FLAGS.algorithm in ['simple', 'dropout', 'precond']:\n postprocess.postprocess_cifar(FLAGS.workdir, 'cifar100')\n path_postprocess = os.path.join(FLAGS.workdir, 'cifar100')\n metrics.Metrics(y_test, path_postprocess)\n\n # Write the gin config in the working directory\n util.save_gin(os.path.join(FLAGS.workdir, 'gin_configuration.txt'))\n util.write_gin(FLAGS.workdir)\n\n\ndef build_model(dataset, model_dir, dim_input, num_classes):\n \"\"\"Create the Bayesian Neural Network and sample from it.\n\n Args:\n dataset: dataset\n model_dir: directory of the model to load the weights of the pretrained\n neural network\n dim_input: dimension of the input vector\n num_classes: number of classes\n Returns:\n model: model\n \"\"\"\n if FLAGS.algorithm == 'simple':\n model = simple.LastLayerBayesian(dataset, FLAGS.workdir, model_dir,\n dim_input, num_classes)\n elif FLAGS.algorithm == 'precond':\n model = precond.LastLayerBayesianPrecond(dataset, FLAGS.workdir, model_dir,\n dim_input, num_classes)\n elif FLAGS.algorithm == 'dropout':\n model = dropout.LastLayerDropout(dataset, FLAGS.workdir, model_dir,\n dim_input, num_classes)\n elif FLAGS.algorithm == 'bootstrap':\n model = bootstrap.LastLayerBootstrap(dataset, FLAGS.workdir, model_dir,\n dim_input, num_classes)\n else:\n raise NotImplementedError('Algorithm not implemented')\n _, _, sampled_weights = model.sample()\n\n # Saving the weights\n if FLAGS.algorithm in ['simple', 'precond']:\n str_file = 'sampled_weights_' + six.ensure_str(model.sampler) + '.npy'\n elif FLAGS.algorithm == 'bootstrap':\n str_file = 'sampled_weights_w{}.npy'.format(model.worker_id)\n else:\n str_file = 'sampled_weights.npy'\n data_path = os.path.join(FLAGS.workdir, str_file)\n with tf.gfile.Open(data_path, 'wb') as f:\n np.save(f, sampled_weights)\n\n return model\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.compat.v1.gfile.Open",
"numpy.load",
"numpy.save",
"tensorflow.compat.v1.keras.utils.to_categorical",
"numpy.vstack"
]
] |
Sharad24/ignite | [
"0de7156bb284bd01d788252469a3b386f10abbd7"
] | [
"tests/ignite/metrics/test_frequency.py"
] | [
"import time\n\nimport pytest\n\nimport torch.distributed as dist\n\nfrom ignite.engine import Engine, Events\nfrom ignite.metrics import Frequency\n\n\ndef test_nondistributed_average():\n artificial_time = 1 # seconds\n num_tokens = 100\n average_upper_bound = num_tokens / artificial_time\n average_lower_bound = average_upper_bound * 0.9\n freq_metric = Frequency()\n freq_metric.reset()\n time.sleep(artificial_time)\n freq_metric.update(num_tokens)\n average = freq_metric.compute()\n assert average_lower_bound < average < average_upper_bound\n\n\ndef _test_frequency_with_engine(device, workers, every=1):\n\n artificial_time = 0.1 / workers # seconds\n total_tokens = 1200 // workers\n batch_size = 128 // workers\n\n estimated_wps = batch_size * workers / artificial_time\n\n def update_fn(engine, batch):\n time.sleep(artificial_time)\n return {\"ntokens\": len(batch)}\n\n engine = Engine(update_fn)\n wps_metric = Frequency(output_transform=lambda x: x[\"ntokens\"], device=device)\n event = Events.ITERATION_COMPLETED(every=every)\n wps_metric.attach(engine, \"wps\", event_name=event)\n\n @engine.on(event)\n def assert_wps(e):\n wps = e.state.metrics[\"wps\"]\n assert estimated_wps * 0.80 < wps < estimated_wps, \"{}: {} < {} < {}\".format(\n e.state.iteration, estimated_wps * 0.80, wps, estimated_wps\n )\n\n data = [[i] * batch_size for i in range(0, total_tokens, batch_size)]\n engine.run(data, max_epochs=1)\n\n\ndef test_frequency_with_engine():\n device = \"cpu\"\n _test_frequency_with_engine(device, workers=1)\n\n\[email protected]\ndef test_frequency_with_engine_distributed(distributed_context_single_node_gloo):\n device = \"cpu\"\n _test_frequency_with_engine(device, workers=dist.get_world_size())\n\n\ndef test_frequency_with_engine_with_every():\n device = \"cpu\"\n _test_frequency_with_engine(device, workers=1, every=1)\n _test_frequency_with_engine(device, workers=1, every=2)\n _test_frequency_with_engine(device, workers=1, every=10)\n\n\[email protected]\ndef test_frequency_with_engine_distributed_with_every(distributed_context_single_node_gloo):\n device = \"cpu\"\n _test_frequency_with_engine(device, workers=dist.get_world_size(), every=1)\n _test_frequency_with_engine(device, workers=dist.get_world_size(), every=2)\n _test_frequency_with_engine(device, workers=dist.get_world_size(), every=10)\n"
] | [
[
"torch.distributed.get_world_size"
]
] |
lilchurro/PoseidonML | [
"22a7e91ebb4c747831b6fdb840a13bf0ad1e3c15"
] | [
"networkml/utils/common.py"
] | [
"import ast\nimport logging\nimport os\n\nimport numpy as np\nimport pika\nfrom redis import StrictRedis\n\n\nclass Common:\n \"\"\"\n Common functions that are shared across models\n \"\"\"\n\n def __init__(self, config=None):\n self.logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO)\n self.r = None\n self.logger = self.setup_logger(self.logger)\n self.setup_env()\n\n if config:\n try:\n self.time_const = config['time constant']\n self.state_size = config['state size']\n self.look_time = config['look time']\n self.threshold = config['threshold']\n self.rnn_size = config['rnn size']\n self.conf_labels = config['conf labels']\n except Exception as e: # pragma: no cover\n self.logger.error(\n 'Unable to read config properly because: %s', str(e))\n\n redis_host = 'redis'\n if 'REDIS_HOST' in os.environ and os.environ['REDIS_HOST'] != '':\n redis_host = os.environ['REDIS_HOST']\n self.connect_redis(host=redis_host)\n\n @staticmethod\n def setup_logger(logger):\n try:\n if 'LOG_LEVEL' in os.environ and os.environ['LOG_LEVEL'] != '':\n logger.setLevel(os.environ['LOG_LEVEL'])\n except Exception as e: # pragma: no cover\n logger.error(\n 'Unable to set logging level because: {0}, defaulting to INFO.'.format(str(e)))\n return logger\n\n def setup_env(self):\n # Get \"RABBIT\" environment variable with a default value of false\n self.use_rabbit = os.getenv('RABBIT', 'False')\n\n # Convert our string into a boolean\n self.use_rabbit = self.use_rabbit.lower() in ['true', 't', 'y', '1']\n self.logger.debug('RABBIT flag set to: %s', str(self.use_rabbit))\n return\n\n def connect_redis(self, host='redis', port=6379, db=0):\n self.r = None\n try:\n self.r = StrictRedis(host=host, port=port, db=db,\n socket_connect_timeout=2)\n except Exception as e: # pragma: no cover\n self.logger.error(\n 'Failed connect to Redis because: {0}'.format(str(e)))\n return\n\n def connect_rabbit(self):\n # Rabbit settings\n self.connection = None\n self.exchange = 'topic-poseidon-internal'\n self.exchange_type = 'topic'\n\n try:\n # Starting rabbit connection\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='rabbit')\n )\n except Exception as e: # pragma: no cover\n self.logger.error(\n 'Failed to open RabbitMQ connection because: {0}'.format(str(e)))\n return\n\n self.channel = self.connection.channel()\n self.channel.exchange_declare(\n exchange=self.exchange, exchange_type=self.exchange_type\n )\n\n self.routing_key = 'poseidon.algos.decider'\n self.logger.debug('Routing key: ' + self.routing_key)\n self.logger.debug('Exchange: ' + self.exchange)\n\n return\n\n def get_address_info(self, address, timestamp):\n '''\n Look up address information prior to the timestamp\n '''\n # Get the timestamps of the past updates for this address\n try:\n updates = self.r.hgetall(address)\n timestamps = ast.literal_eval(\n updates[b'timestamps'].decode('ascii'))\n except Exception as e: # pragma: no cover\n self.logger.debug(\n 'No timestamp found because: {0}, setting to None'.format(str(e)))\n timestamps = None\n\n # If there is a previous update, read out the state\n last_update = None\n if timestamps is not None:\n # Get the most recent update prior to the current timestsmp\n updates = [time for time in timestamps if time < timestamp]\n if len(updates) > 0:\n last_update = max(updates)\n\n return last_update\n\n def get_previous_state(self, source_mac, timestamp):\n '''\n Gets the average representation vector from the most recent update\n before the current timestamp\n\n Args:\n source_mac: MAC address to read the representation from\n timestamp: Current timestamp. Get only representations before this\n\n Returns:\n last_update: Timestamp of last update\n previous_representation: Average representation at last update\n '''\n\n # Try to read the old updates, if there are none return Nones\n try:\n updates = self.r.hgetall(source_mac)\n except Exception as e: # pragma: no cover\n self.logger.warning(\n 'Unable to read old updates because: {0}, defaulting to None'.format(str(e)))\n return None, None\n\n # Get the most recent prior timestamp from the update list\n try:\n update_list = ast.literal_eval(\n updates[b'timestamps'].decode('ascii'))\n except Exception as e: # pragma: no cover\n self.logger.debug(\n 'Empty update list because: {0} key not found'.format(str(e)))\n update_list = []\n last_update = None\n for update in update_list:\n if update < timestamp.timestamp():\n last_update = update\n\n # Return Nones if there is no earlier update\n if last_update is None:\n return None, None\n\n # Read the last updated entry to get the previous representation\n key = source_mac + '_' + str(last_update)\n previous_representation = None\n try:\n state = self.r.hgetall(key)\n previous_representation = ast.literal_eval(\n state[b'representation'].decode('ascii'))\n except Exception as e: # pragma: no cover\n self.logger.error(\n 'Failed to get previous representation because: {0}'.format(str(e)))\n return None, None\n return last_update, previous_representation\n\n def average_representation(\n self,\n representations,\n timestamps,\n prev_representation=None,\n last_update=None,\n ):\n '''\n Computes the new representation from the old ones if they are given.\n If they are not, compute the EMA of the new observations\n\n Args:\n representations: New representations\n timestamps: Times that the new representations were seen\n prev_representation: The representation computed at last_update\n last_update: Time of previous representation update\n\n Returns:\n new_timestamp: Timestamp of the new representations\n new_representation: Newly computed representations\n '''\n\n # If there is no previous representation, default to zeros\n if prev_representation is None:\n last_update = None\n prev_representation = np.zeros(representations.shape[1])\n\n prev_time = last_update\n representation = prev_representation\n for i, rep in enumerate(representations):\n time = timestamps[i].timestamp()\n # If there was no previous update the representation is set equal to\n # the current representaton\n if prev_time is None:\n representation = rep\n prev_time = time\n # If the time of the representation is after the previous update,\n # compute the exponentially weighted moving average.\n elif time > prev_time:\n time_diff = time - prev_time\n alpha = 1 - np.exp(-time_diff/self.time_const)\n representation += alpha*(rep - representation)\n prev_time = time\n\n return time, representation\n\n def update_data(\n self,\n source_mac,\n representations,\n timestamps,\n predictions,\n other_ips,\n model_hash\n ):\n '''\n Updates the stored data with the new information\n\n Args:\n source_mac: Address of the representaion to update\n representations: New observations of representations\n timestamps: Time at which each representation was observed\n predictions: Model predictions along with confidences\n other_ips: Other IP addresses the source has communicated with\n model_hash: Hash of the model used to compute this information\n '''\n # Get the previous update time and average representation\n last_update, prev_rep = self.get_previous_state(\n source_mac, timestamps[0])\n\n # Compute current representation\n time, current_rep = self.average_representation(\n representations, timestamps)\n\n # Compute moving average representation\n time, avg_rep = self.average_representation(\n representations,\n timestamps,\n prev_representation=prev_rep,\n last_update=last_update\n )\n\n # Separate labels and confidences\n labels = [label for label, confidence in predictions]\n confidences = [confidence for label, confidence in predictions]\n\n # Create the information to store\n key = source_mac + '_' + str(time)\n state = {\n 'representation': list(avg_rep),\n 'current_representation': list(current_rep),\n 'labels': labels,\n 'confidences': confidences,\n 'other_ips': sorted(other_ips),\n 'model_hash': model_hash\n }\n\n self.logger.debug('created key %s', key)\n self.logger.debug(state)\n redis_state = {}\n for k in state:\n redis_state[k] = str(state[k])\n try:\n self.logger.debug('Storing data')\n self.r.hmset(key, redis_state)\n self.r.sadd('mac_addresses', source_mac)\n self.logger.debug('Storing update time')\n # Add this update time to the list of updates\n updates = self.r.hgetall(source_mac)\n update_list = ast.literal_eval(\n updates[b'timestamps'].decode('ascii'))\n self.logger.debug(\n 'Got previous updates from {0}'.format(source_mac))\n except Exception as e: # pragma: no cover\n self.logger.debug(\n 'No previous updates found for {0} because: {1}'.format(source_mac, str(e)))\n update_list = []\n\n update_list.append(time)\n update_list = sorted(update_list)\n times = {'timestamps': update_list}\n self.logger.debug('Updating %s', source_mac)\n self.logger.debug(times)\n redis_times = {}\n for k in times:\n redis_times[k] = str(times[k])\n try:\n self.r.hmset(source_mac, redis_times)\n self.r.sadd('mac_addresses', source_mac)\n except (ConnectionError, TimeoutError) as e: # pragma: no cover\n self.logger.debug(\n 'Could not store update time because: %s', str(e))\n\n return key\n\n def basic_decision(\n self,\n key,\n address,\n prev_time,\n timestamp,\n labels,\n confs,\n abnormality\n ):\n\n valid = True\n\n if labels is None:\n labels = ['Unknown']*3\n confs = [1, 0, 0]\n valid = False\n\n if key is None:\n key = address\n valid = False\n\n investigate = False\n if prev_time is not None and timestamp - prev_time > self.look_time:\n investigate = True\n if labels[0] == 'Unknown':\n investigate = True\n\n behavior = 'normal'\n if abnormality > self.threshold:\n behavior = 'abnormal'\n\n output = {}\n decisions = {'behavior': behavior, 'investigate': investigate}\n classifications = {'labels': labels[0:3], 'confidences': confs[0:3]}\n id_dict = {\n 'decisions': decisions,\n 'classification': classifications,\n 'timestamp': timestamp,\n 'valid': valid\n }\n output[key] = id_dict\n return output\n"
] | [
[
"numpy.exp",
"numpy.zeros"
]
] |
geraldomacias/MarkLogic | [
"996c48a970a24aa7e5af4752fe9c12b63d4834fe"
] | [
"services/machine_learning/project/linearSVC.py"
] | [
"# Db stuff\nfrom project import db\nfrom project.api.models import decode_auth_token, MLStatus\n\n# Machine learning\nimport sys\nimport json\nimport pandas as pd\nimport requests\nfrom sklearn import preprocessing\nfrom collections import defaultdict\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import HashingVectorizer\n\n\"\"\"\n jsonInput format\n { <file_name1> : [col_name1, col_name2, ....], }\n\"\"\"\ndef matchSport(jsonInput, auth_token, app):\n current_classification = classifier(jsonInput, auth_token, app)\n data = current_classification.get_data()\n vectorizer = current_classification.get_vectorizer(data)\n trainer = current_classification.train_classifier(vectorizer, data)\n df = current_classification.create_user_dataframe(jsonInput)\n results = current_classification.calculate_results(trainer)\n (predicted_sport, confidence) = current_classification.get_predicted_sport(results)\n cwd = current_classification.get_current_working_directory(auth_token, app)\n file = current_classification.get_uploaded_file(cwd)\n json_frame = current_classification.append_classified_field(predicted_sport, confidence, file)\n filepath = current_classification.save_classified_file(cwd, json_frame, auth_token, app)\n current_classification.update_endpoints(filepath, auth_token, app, json_frame)\n\n\n\n\n# Moved function logic to a class so the current_classification\n# object can be testable\n# Welc: Break out method object\nclass classifier:\n # Python contrusctor\n def __init__(self, jsonInput, auth_token, app):\n self.jsonInput = jsonInput\n self.auth_token = auth_token\n self.app = app\n\n def get_data(self):\n # Load data.json\n with open('project/data2.json') as f:\n data = json.load(f)\n\n # Convert json objects into python matrix\n X = []\n Y = []\n for element in data:\n for key in element.keys():\n X.append(key)\n for value in element.values():\n Y.append(value)\n\n # Reshape the dataframe to be ['sport', 'col_name']\n data = pd.DataFrame(X, columns=['sport'])\n data['col'] = Y\n data['cop'] = data.sport\n return data\n\n def get_vectorizer(self, data):\n return HashingVectorizer(n_features=2**4)\n\n\n def train_classifier(self, vectorizer, data):\n # Hashing Vectorizer\n vec_data = vectorizer.fit_transform(data.col)\n self.vec_data = vec_data\n\n # Set the desired output into a separate dataframe\n target = data.sport\n\n # Split data set into train and test sets\n data_train, data_test, target_train, target_test = \\\n train_test_split(\\\n vec_data, target,\\\n test_size = 0.30,\\\n random_state = 3)\n\n # Create an object of the type LinearSVC\n svc_model = LinearSVC(random_state=0)\n\n # Train the algorithm on training data\n # Returns trainer\n return svc_model.fit(data_train, target_train)\n\n\n\n def create_user_dataframe(self, jsonInput):\n # Place the json_input into a dataframe df\n # Create the shape and data for the df\n X = []\n Y = []\n json_in = json.loads(jsonInput)\n for key in json_in:\n X.append(key)\n for value in json_in[key]:\n Y.append(value)\n\n self.X = X\n # Create the dataframe df and places Y-values\n return pd.DataFrame(Y, columns=['col'])\n\n\n\n def calculate_results(self, trainer):\n # Store the results\n return trainer.predict(self.vec_data)\n\n\n\n def get_predicted_sport(self, results):\n # Sum all the predictions\n counts = {}\n for res in results:\n if res in counts:\n counts[res] += 1\n else:\n counts[res] = 0\n\n # Get the sum, max, and confidence get_values\n # for the predicted results\n summation = sum(counts.values())\n maximum = max(counts.values())\n if maximum == 0:\n confidence = 0\n else:\n confidence = maximum / summation * 100\n\n # Get the predicted sport\n predicted_sport = max(counts, key=counts.get)\n\n # Get the max prediction occurance\n return (predicted_sport, confidence)\n\n\n def get_current_working_directory(self, auth_token, app):\n # get the current working directory\n return get_cwd(auth_token, app)\n\n\n def get_uploaded_file(self, cwd):\n # Get the file location\n return cwd + '/' + self.X[0]\n\n\n def append_classified_field(self, predicted_sport, confidence, df_file):\n # Load the csv file into a pandas dataframe\n df = pd.read_csv(df_file).dropna()\n\n # append a sport column with the predicted sport\n df['predicted_sport'] = predicted_sport\n\n # append the confidence level with the predicted\n df['prediction_confidence%'] = confidence\n\n # Formatting for player cards\n rows = df.shape[0]\n columns = df.columns\n spec_cols = []\n players = []\n\n # Get all special int64 column names\n for col in columns:\n if df[col].dtype == 'int64':\n spec_cols.append(col)\n\n # Make a dictionary which contains all players\n # For each player\n for i in range(rows):\n player = {}\n # For each field\n for col in columns:\n # Typecast int64 to int\n if col in spec_cols:\n player[col] = int(df.iloc[i][col])\n else:\n player[col] = df.iloc[i][col]\n players.append(player)\n\n # save the dataframe into a json object\n return json.dumps(players)\n\n\n def save_classified_file(self, cwd, json_frame, auth_token, app):\n # Save the classified file to the cwd\n selected_files = get_values(auth_token, app)\n\n # Build name from selected_files\n filepath = cwd + '/' + 'classified'\n for file_name in selected_files:\n filepath = filepath + '_' + file_name\n filepath = filepath + '.json'\n\n with open(filepath, 'w+') as json_file:\n json.dump(json_frame, json_file)\n return filepath\n\n\n def update_endpoints(self, filepath, auth_token, app, json_frame):\n # Open the saved file\n files = {'classifed': open(filepath, 'rb')}\n\n # Get selected files from db\n selected_files = get_values(auth_token, app)\n\n # Can loop to produce multiple files\n values = {'file1': selected_files[0]}\n\n # Hit s3/uploadClassified\n jake_point(files, values, auth_token)\n\n # update the status with the json_frame\n update_status(auth_token, app, json_frame)\n\n\n\ndef jake_point(files, values, auth_token):\n g_headers = {'Authorization': 'Bearer ' + auth_token}\n url = 'http://file_system:5000/s3/uploadClassified'\n r = requests.post(url, files=files, data=values, headers=g_headers)\n\n\n# Retrieves selected files from the db\ndef get_values(auth_token, app):\n with app.app_context():\n resp = decode_auth_token(auth_token)\n if isinstance(resp, str):\n # Auth token invalid\n print(resp, flush=True)\n # Is a user_id\n row = MLStatus.query.filter_by(user_id=resp).first()\n if not row:\n # Cannot find use in the status DB. No bueno\n print('Cannot find user in the status DB')\n # Return the selected files from the user\n return row.selected_files;\n\n# Retrieves current working directory from the db\ndef get_cwd(auth_token, app):\n with app.app_context():\n resp = decode_auth_token(auth_token)\n if isinstance(resp, str):\n # Auth token invalid\n print(resp)\n # Is a user_id\n row = MLStatus.query.filter_by(user_id=resp).first()\n if not row:\n # Cannot find use in the status DB. No bueno\n print('Cannot find user in the status DB')\n # Return the location of the original file\n return row.working_directory;\n\n\n# Updates the users status to Complete.\ndef update_status(auth_token, app, j_frame):\n with app.app_context():\n resp = decode_auth_token(auth_token)\n if isinstance(resp, str):\n # Auth token invalid\n print(resp)\n # Is a user_id\n row = MLStatus.query.filter_by(user_id=resp).first()\n if not row:\n # Cannot find use in the status DB. No bueno\n print('Cannot find user in the status DB')\n\n # update the user status\n row.status = 'Completed.'\n # Update the status field\n row.classified_json = j_frame\n # send to db\n db.session.commit()\n"
] | [
[
"pandas.DataFrame",
"sklearn.feature_extraction.text.HashingVectorizer",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"sklearn.svm.LinearSVC"
]
] |
mrdrozdov/allRank | [
"6cc5675d467f4fc8bfb5beba92d9c052ff725346"
] | [
"allrank/main.py"
] | [
"from urllib.parse import urlparse\nimport hashlib\nimport collections\n\nfrom tqdm import tqdm\n\nimport allrank.models.losses as losses\nimport numpy as np\nimport os\nimport torch\nfrom allrank.config import Config\nfrom allrank.data.dataset_loading import load_libsvm_dataset, create_data_loaders\nfrom allrank.models.model import make_model\nfrom allrank.models.model_utils import get_torch_device, CustomDataParallel\nfrom allrank.training.train_utils import fit\nfrom allrank.utils.command_executor import execute_command\nfrom allrank.utils.experiments import dump_experiment_result, assert_expected_metrics\nfrom allrank.utils.file_utils import create_output_dirs, PathsContainer, copy_local_to_gs\nfrom allrank.utils.ltr_logging import init_logger\nfrom allrank.utils.python_utils import dummy_context_mgr\nfrom argparse import ArgumentParser, Namespace\nfrom attr import asdict\nfrom functools import partial\nfrom pprint import pformat\nfrom torch import optim\nimport scipy\n\n\ndef parse_args() -> Namespace:\n parser = ArgumentParser(\"allRank\")\n parser.add_argument(\"--job-dir\", help=\"Base output path for all experiments\", required=True)\n parser.add_argument(\"--run-id\", help=\"Name of this run to be recorded (must be unique within output dir)\",\n required=True)\n parser.add_argument(\"--config-file-name\", required=True, type=str, help=\"Name of json file with config\")\n\n return parser.parse_args()\n\n\nclass Dstore:\n def __init__(self, path, dstore_size=None, vec_size=None, enabled=False, load_in_collate=False, load_in_main_loop=False, main_loop_batch=None, load_xb=False, prefetch=False,\n q_path=None, q_dstore_size=None, vocab=None, init_from_fasttext=False, fasttext_path=None):\n self.path = path\n self.dstore_size = dstore_size\n self.q_path = q_path\n self.q_dstore_size = q_dstore_size\n self.vec_size = vec_size\n self.enabled = enabled\n self.load_xb = load_xb\n self.load_in_main_loop= load_in_main_loop\n self.main_loop_batch = main_loop_batch\n self.prefetch = prefetch\n self._initialized = False\n if vocab is not None:\n self.vocab = Dictionary()\n self.vocab.add_from_file(vocab)\n self.vocab.finalize()\n print('Found vocab with size {} at path {}'.format(len(self.vocab), vocab))\n self.init_from_fasttext = init_from_fasttext\n self.fasttext_path = fasttext_path\n self.initialize()\n\n def initialize(self):\n self.keys = np.memmap(os.path.join(self.path, 'dstore_keys.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, self.vec_size))\n self.q_keys = np.memmap(os.path.join(self.q_path, 'dstore_keys.npy'), dtype=np.float32, mode='r', shape=(self.q_dstore_size, self.vec_size))\n self._initialized = True\n\n def get_n_features(self, n_features, config):\n if self.enabled:\n n_features -= 1 # x_id (key id)\n n_features -= 1 # q_src (query src)\n n_features -= 1 # p\n n_features -= 1 # dist\n n_features -= 1 # q_tgt (query tgt)\n n_features -= 1 # x_tgt (key tgt)\n if not config.model.fc_model['ignore_q_feat']:\n n_features += self.vec_size # query vector\n if not config.model.fc_model['ignore_x_feat']:\n n_features += self.vec_size # key vector\n if config.model.fc_model['embed_q_src']:\n n_features += config.model.fc_model['embed_size']\n if config.model.fc_model['embed_x_tgt']:\n n_features += config.model.fc_model['embed_size']\n return n_features\n\n def load_fetch(self, path, keys, index):\n m = hashlib.sha256()\n for x in index:\n m.update(str.encode('{}'.format(x)))\n data_hash = m.hexdigest()\n\n cache_path = os.path.join(path, '{}.cache.v0_0_1.npy'.format(data_hash))\n\n print('cache source shape {}'.format(keys.shape))\n\n shape = (len(index), self.vec_size)\n if not os.path.exists(cache_path):\n shape = (len(index), self.vec_size)\n print('build cache shape = {}, and save to {}'.format(shape, cache_path))\n cache = np.memmap(cache_path, mode='w+', dtype=np.float32, shape=shape)\n bsz = 1000\n nbatches = len(index) // bsz\n if nbatches * bsz < len(index):\n nbatches += 1\n for i in tqdm(range(nbatches)):\n start = i * bsz\n end = min(start + bsz, len(index))\n local_index = index[start:end]\n cache[start:end] = keys[local_index]\n del cache\n print('read cache from {}'.format(cache_path))\n cache = np.memmap(cache_path, mode='r', dtype=np.float32, shape=shape)\n return cache, index\n\n def run_prefetch(self, dl_lst):\n print('Run prefetch...')\n unique_ids_x = set()\n unique_ids_q = set()\n\n all_lst = collections.defaultdict(list)\n for dl in dl_lst:\n n, k, n_sparse_feat = dl.dataset.shape\n all_lst['x'].append(np.concatenate(dl.dataset.X_by_qid, axis=0).reshape(n, k, n_sparse_feat)[:, :, 0])\n all_lst['q'].append(np.concatenate(dl.dataset.q_by_qid, axis=0).reshape(n, k, 1)[:, :, 0])\n all_lst['dl'].append(dl)\n #knns = all_x[:, :, 0]\n #knn_tgts = all_x[:, :, 3]\n #query_ids = all_q[:, :, 0]\n\n #unique_ids_q.update(query_ids)\n #unique_ids_x.update(knns.astype(np.int))\n\n # x\n ids = np.concatenate(all_lst['x'], axis=0).astype(np.int)\n u, inv = np.unique(ids, return_inverse=True)\n fetched, index = self.load_fetch(self.path, self.keys, u)\n\n self.unique_x = u\n self.x_vecs = fetched\n offset = 0\n for x, dl in zip(all_lst['x'], all_lst['dl']):\n for bucket in dl.dataset.X_by_qid:\n size, _ = bucket.shape\n bucket[:, 0] = inv[offset:offset + size]\n offset += size\n\n # q\n ids = np.concatenate(all_lst['q'], axis=0).astype(np.int)\n u, inv = np.unique(ids, return_inverse=True)\n fetched, index = self.load_fetch(self.q_path, self.q_keys, u)\n\n self.unique_q = u\n self.q_vecs = fetched\n offset = 0\n for x, dl in zip(all_lst['x'], all_lst['dl']):\n for bucket in dl.dataset.q_by_qid:\n size = bucket.shape[0]\n bucket[:] = inv[offset:offset + size]\n offset += size\n\n print('done.')\n\n def load_from_memmap(self, idx, feat_type=None):\n u, inv = np.unique(idx.cpu().long().numpy(), return_inverse=True)\n if feat_type == 'x':\n tmp = self.x_vecs[u]\n elif feat_type == 'q':\n tmp = self.q_vecs[u]\n else:\n raise ValueError\n\n tmp = tmp[inv]\n tmp = torch.from_numpy(tmp).view(idx.shape[0], idx.shape[1], self.vec_size)\n return tmp\n\n def load(self, xb, qb):\n x_id, q_src, p, dist, q_tgt, x_tgt = torch.chunk(xb, 6, dim=2)\n xvec = self.load_from_memmap(x_id.long(), feat_type='x')\n qvec = self.load_from_memmap(qb.long(), feat_type='q')\n out = torch.cat([xvec, qvec, xb], -1)\n return out\n\n def _load_in_main_loop(self, xb, qb):\n return self.load(xb, qb)\n\n def _load_in_collate(self, xb, qb):\n return self.load(xb, qb)\n\n\nclass Dictionary(object):\n \"\"\"\n A mapping from symbols to consecutive integers.\n\n Taken from fairseq repo.\n \"\"\"\n\n def __init__(\n self,\n pad=\"<pad>\",\n eos=\"</s>\",\n unk=\"<unk>\",\n bos=\"<s>\",\n extra_special_symbols=None,\n ):\n self.unk_word, self.pad_word, self.eos_word = unk, pad, eos\n self.symbols = []\n self.count = []\n self.indices = {}\n self.bos_index = self.add_symbol(bos)\n self.pad_index = self.add_symbol(pad)\n self.eos_index = self.add_symbol(eos)\n self.unk_index = self.add_symbol(unk)\n if extra_special_symbols:\n for s in extra_special_symbols:\n self.add_symbol(s)\n self.nspecial = len(self.symbols)\n\n def __eq__(self, other):\n return self.indices == other.indices\n\n def __getitem__(self, idx):\n if idx < len(self.symbols):\n return self.symbols[idx]\n return self.unk_word\n\n def __len__(self):\n \"\"\"Returns the number of symbols in the dictionary\"\"\"\n return len(self.symbols)\n\n def __contains__(self, sym):\n return sym in self.indices\n\n def index(self, sym):\n \"\"\"Returns the index of the specified symbol\"\"\"\n assert isinstance(sym, str)\n if sym in self.indices:\n return self.indices[sym]\n return self.unk_index\n\n def unk_string(self, escape=False):\n \"\"\"Return unknown string, optionally escaped as: <<unk>>\"\"\"\n if escape:\n return \"<{}>\".format(self.unk_word)\n else:\n return self.unk_word\n\n def add_symbol(self, word, n=1):\n \"\"\"Adds a word to the dictionary\"\"\"\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + n\n return idx\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(n)\n return idx\n\n def update(self, new_dict):\n \"\"\"Updates counts from new dictionary.\"\"\"\n for word in new_dict.symbols:\n idx2 = new_dict.indices[word]\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + new_dict.count[idx2]\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(new_dict.count[idx2])\n\n def finalize(self, threshold=-1, nwords=-1, padding_factor=8):\n \"\"\"Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n if nwords <= 0:\n nwords = len(self)\n\n new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))\n new_symbols = self.symbols[: self.nspecial]\n new_count = self.count[: self.nspecial]\n\n c = collections.Counter(\n dict(\n sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))\n )\n )\n for symbol, count in c.most_common(nwords - self.nspecial):\n if count >= threshold:\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(count)\n else:\n break\n\n assert len(new_symbols) == len(new_indices)\n\n self.count = list(new_count)\n self.symbols = list(new_symbols)\n self.indices = new_indices\n\n self.pad_to_multiple_(padding_factor)\n\n def pad_to_multiple_(self, padding_factor):\n \"\"\"Pad Dictionary size to be a multiple of *padding_factor*.\"\"\"\n if padding_factor > 1:\n i = 0\n while len(self) % padding_factor != 0:\n symbol = \"madeupword{:04d}\".format(i)\n self.add_symbol(symbol, n=0)\n i += 1\n\n def bos(self):\n \"\"\"Helper to get index of beginning-of-sentence symbol\"\"\"\n return self.bos_index\n\n def pad(self):\n \"\"\"Helper to get index of pad symbol\"\"\"\n return self.pad_index\n\n def eos(self):\n \"\"\"Helper to get index of end-of-sentence symbol\"\"\"\n return self.eos_index\n\n def unk(self):\n \"\"\"Helper to get index of unk symbol\"\"\"\n return self.unk_index\n\n def add_from_file(self, f):\n \"\"\"\n Loads a pre-existing dictionary from a text file and adds its symbols\n to this instance.\n \"\"\"\n if isinstance(f, str):\n try:\n with open(f, \"r\", encoding=\"utf-8\") as fd:\n self.add_from_file(fd)\n except FileNotFoundError as fnfe:\n raise fnfe\n except UnicodeError:\n raise Exception(\n \"Incorrect encoding detected in {}, please \"\n \"rebuild the dataset\".format(f)\n )\n return\n\n for line in f.readlines():\n idx = line.rfind(\" \")\n if idx == -1:\n raise ValueError(\n \"Incorrect dictionary format, expected '<token> <cnt>'\"\n )\n word = line[:idx]\n count = int(line[idx + 1 :])\n self.indices[word] = len(self.symbols)\n self.symbols.append(word)\n self.count.append(count)\n\n\ndef run():\n\n args = parse_args()\n\n paths = PathsContainer.from_args(args.job_dir, args.run_id, args.config_file_name)\n\n create_output_dirs(paths.output_dir)\n\n logger = init_logger(paths.output_dir)\n logger.info(f\"created paths container {paths}\")\n\n # read config\n config = Config.from_json(paths.config_path)\n logger.info(\"Config:\\n {}\".format(pformat(vars(config), width=1)))\n\n # reproducibility\n torch.manual_seed(config.seed)\n torch.cuda.manual_seed_all(config.seed)\n np.random.seed(config.seed)\n logger.info(\"Seed: {}\".format(config.seed))\n\n output_config_path = os.path.join(paths.output_dir, \"used_config.json\")\n execute_command(\"cp {} {}\".format(paths.config_path, output_config_path))\n\n # train_ds, val_ds\n train_ds, val_ds = load_libsvm_dataset(\n input_path=config.data.path,\n slate_length=config.data.slate_length,\n validation_ds_role=config.data.validation_ds_role,\n )\n\n n_features = train_ds.shape[-1]\n assert n_features == val_ds.shape[-1], \"Last dimensions of train_ds and val_ds do not match!\"\n\n # load dstore and use as feature func\n dstore = Dstore(**config.dstore)\n n_features = dstore.get_n_features(n_features, config)\n\n # train_dl, val_dl\n train_dl, val_dl = create_data_loaders(\n train_ds, val_ds, num_workers=config.data.num_workers, batch_size=config.data.batch_size,\n dstore=dstore)\n\n if dstore.prefetch:\n dstore.run_prefetch([train_dl, val_dl])\n\n # gpu support\n dev = get_torch_device()\n logger.info(\"Model training will execute on {}\".format(dev.type))\n\n # instantiate model\n model = make_model(n_features=n_features, dstore=dstore, **asdict(config.model, recurse=False))\n if torch.cuda.device_count() > 1:\n model = CustomDataParallel(model)\n logger.info(\"Model training will be distributed to {} GPUs.\".format(torch.cuda.device_count()))\n model.to(dev)\n\n # load optimizer, loss and LR scheduler\n optimizer = getattr(optim, config.optimizer.name)(params=model.parameters(), **config.optimizer.args)\n loss_func = partial(getattr(losses, config.loss.name), **config.loss.args)\n if config.lr_scheduler.name:\n scheduler = getattr(optim.lr_scheduler, config.lr_scheduler.name)(optimizer, **config.lr_scheduler.args)\n else:\n scheduler = None\n\n with torch.autograd.detect_anomaly() if config.detect_anomaly else dummy_context_mgr(): # type: ignore\n # run training\n result = fit(\n model=model,\n feature_func=dstore,\n loss_func=loss_func,\n optimizer=optimizer,\n scheduler=scheduler,\n train_dl=train_dl,\n valid_dl=val_dl,\n config=config,\n device=dev,\n output_dir=paths.output_dir,\n tensorboard_output_path=paths.tensorboard_output_path,\n **asdict(config.training)\n )\n\n dump_experiment_result(args, config, paths.output_dir, result)\n\n if urlparse(args.job_dir).scheme == \"gs\":\n copy_local_to_gs(paths.local_base_output_path, args.job_dir)\n\n assert_expected_metrics(result, config.expected_metrics)\n\n\nif __name__ == \"__main__\":\n run()\n"
] | [
[
"numpy.concatenate",
"torch.cat",
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.from_numpy",
"numpy.memmap",
"torch.autograd.detect_anomaly",
"torch.chunk",
"numpy.unique"
]
] |
kgruchal/bring-me-a-shrubbery | [
"cdfba30ffccb997876b05ae3f260960daad18215"
] | [
"common.py"
] | [
"import math\n\nimport numpy as np\n\n\ndef in_hull(p, hull):\n \"\"\"\n Test if points in `p` are in `hull`\n\n `p` should be a `NxK` coordinates of `N` points in `K` dimensions\n `hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the \n coordinates of `M` points in `K`dimensions for which Delaunay triangulation\n will be computed\n \"\"\"\n from scipy.spatial import Delaunay\n if not isinstance(hull, Delaunay):\n hull = Delaunay(hull)\n\n return hull.find_simplex(p) >= 0\n\n\ndef dist(hull, points):\n # Construct PyGEL Manifold from the convex hull\n m = gel.Manifold()\n for s in hull.simplices:\n m.add_face(hull.points[s])\n\n dist = gel.MeshDistance(m)\n res = []\n for p in points:\n # Get the distance to the point\n # But don't trust its sign, because of possible\n # wrong orientation of mesh face\n d = dist.signed_distance(p)\n\n # Correct the sign with ray inside test\n if dist.ray_inside_test(p):\n if d > 0:\n d *= -1\n else:\n if d < 0:\n d *= -1\n res.append(d)\n return np.array(res)\n\n\ndef get_2d(pcd_numpy, axis_1, axis_2, plot=True):\n slice_1 = pcd_numpy[:, axis_1]\n slice_2 = pcd_numpy[:, axis_2]\n slice_3 = np.zeros((pcd_numpy.shape[0], ))\n\n slice_2d = np.stack((slice_1, slice_2), axis=1)\n\n slice_final = np.stack((slice_1, slice_2, slice_3), axis=1)\n\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(slice_final)\n if plot:\n o3d.visualization.draw_geometries([pcd])\n\n return slice_2d\n\n\ndef polar2cart(r, theta, phi):\n return [\n r * math.sin(theta) * math.cos(phi),\n r * math.sin(theta) * math.sin(phi),\n r * math.cos(theta)\n ]\n\n\ndef cart2sph(x, y, z):\n hxy = np.hypot(x, y)\n r = np.hypot(hxy, z)\n el = np.arctan2(z, hxy)\n az = np.arctan2(y, x)\n #phi, theta, r\n return [az, el, r]\n\n\ndef get_center(np_array):\n center_x = np.average(np_array[:, 0])\n center_y = np.average(np_array[:, 1])\n center_z = np.average(np_array[:, 2])\n\n return center_x, center_y, center_z\n\n\ndef farthest_point_sampling(points, k):\n print('Performing farthest point sampling....')\n # First, pick a random point to start\n # add it to the solution, remove it from the full list\n solution_set = []\n seed = np.random.randint(0, points.shape[0] - 1)\n solution_set.append(points[seed, :])\n points = np.delete(points, (seed), axis=0)\n\n # Now, iterate k-1 times\n # Find the farthest point, add to solution, remove from list, repeat\n for i in tqdm(range(k-1)):\n distances = np.full((points.shape[0],), np.inf)\n for j, point in enumerate(points):\n for sol_point in solution_set:\n distances[j] = min(distances[j], distance(point, sol_point))\n\n picked_index = np.argmax(distances)\n solution_set.append(points[picked_index])\n points = np.delete(points, (picked_index), axis=0)\n\n return solution_set\n\n\ndef distance(A, B):\n return np.linalg.norm(A-B)\n\n\ndef polar2cart2d(rho, phi):\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return(x, y)\n\n\ndef pdf(algorithm, kwargs):\n probs = algorithm.pdf(**kwargs)\n return probs\n\n\ndef sample(algorithm, kwargs):\n points = algorithm.rvs(**kwargs)\n return points\n\n\ndef mask(points):\n # Illuminate points based on z-value\n min_z = min(points[:, 2])\n max_z = max(points[:, 2])\n # Determine step based on number of views\n\n num_views = 10\n\n step = (max_z - min_z)/num_views\n\n # Need to round to 1 decimal places\n for z in np.arange(min_z, max_z, step):\n # Select the points with matching z-value\n # print(z)\n temp_points = points[np.round(\n points[:, 2], decimals=1) == np.round(z, decimals=1)]\n scene_2d = get_2d(temp_points, 0, 1)\n"
] | [
[
"numpy.full",
"numpy.array",
"numpy.delete",
"numpy.linalg.norm",
"numpy.sin",
"numpy.zeros",
"numpy.round",
"numpy.stack",
"numpy.random.randint",
"numpy.arctan2",
"numpy.hypot",
"numpy.arange",
"numpy.average",
"numpy.argmax",
"numpy.cos",
"scipy.spatial.Delaunay"
]
] |
theboyslush/mila | [
"274da1d3c7e7e4385fa58eb6aebd24c5ef7fed4d"
] | [
"nlu/classifier.py"
] | [
"from tensorflow.keras.models import load_model\nimport numpy as np\n\nlabels = open('nlu\\entities.txt', 'r', encoding='utf-8').read().split('\\n')\nmodel = load_model('nlu\\model.h5')\n\nlabel2idx = {}\nidx2label = {}\n\nfor k, label in enumerate(labels):\n label2idx[label] = k\n idx2label[k] = label\n\n\n# Classify any given text into a category of our NLU framework\ndef classify(text):\n # Create an input array\n x = np.zeros((1, 48, 256), dtype='float32')\n\n # Fill the x array with data from input text\n for k, ch in enumerate(bytes(text.encode('utf-8'))):\n x[0, k, int(ch)] = 1.0\n\n out = model.predict(x)\n idx = out.argmax()\n\n #print('Text: \"{}\" is classified as \"{}\"'.format(text, idx2label[idx]))\n return idx2label[idx]\n\n'''\nif __name__=='__main__':\n while True:\n text = input('Enter some text:')\n print(classify(text))\n'''"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.zeros"
]
] |
deel-ai/xplique | [
"1c493cf290970d05f1430cee04e2cd590d303f9c"
] | [
"xplique/attributions/occlusion.py"
] | [
"\"\"\"\nModule related to Occlusion sensitivity method\n\"\"\"\n\nfrom math import ceil\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .base import BlackBoxExplainer, sanitize_input_output\nfrom ..commons import repeat_labels, batch_tensor\nfrom ..types import Callable, Tuple, Union, Optional\n\n\nclass Occlusion(BlackBoxExplainer):\n \"\"\"\n Used to compute the Occlusion sensitivity method, sweep a patch that occludes pixels over the\n images and use the variations of the model prediction to deduce critical areas.\n\n Ref. Zeiler & al., Visualizing and Understanding Convolutional Networks (2013).\n https://arxiv.org/abs/1311.2901\n\n Parameters\n ----------\n model\n The model from which we want to obtain explanations\n batch_size\n Number of pertubed samples to explain at once.\n Default to 32.\n patch_size\n Size of the patches to apply, if integer then assume an hypercube.\n patch_stride\n Stride between two patches, if integer then assume an hypercube.\n occlusion_value\n Value used as occlusion.\n \"\"\"\n\n def __init__(self,\n model: Callable,\n batch_size: Optional[int] = 32,\n patch_size: Union[int, Tuple[int, int]] = 3,\n patch_stride: Union[int, Tuple[int, int]] = 3,\n occlusion_value: float = 0.0):\n super().__init__(model, batch_size)\n\n self.patch_size = patch_size\n self.patch_stride = patch_stride\n self.occlusion_value = occlusion_value\n\n @sanitize_input_output\n def explain(self,\n inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray],\n targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> tf.Tensor:\n \"\"\"\n Compute Occlusion sensitivity for a batch of samples.\n\n Parameters\n ----------\n inputs\n Dataset, Tensor or Array. Input samples to be explained.\n If Dataset, targets should not be provided (included in Dataset).\n Expected shape among (N, W), (N, T, W), (N, W, H, C).\n More information in the documentation.\n targets\n Tensor or Array. One-hot encoding of the model's output from which an explanation\n is desired. One encoding per input and only one output at a time. Therefore,\n the expected shape is (N, output_size).\n More information in the documentation.\n\n Returns\n -------\n explanations\n Occlusion sensitivity, same shape as the inputs, except for the channels.\n \"\"\"\n\n # check if data is images\n is_image = len(inputs.shape) > 2\n\n if is_image:\n if not isinstance(self.patch_size, tuple):\n self.patch_size = (self.patch_size, self.patch_size)\n if not isinstance(self.patch_stride, tuple):\n self.patch_stride = (self.patch_stride, self.patch_stride)\n\n occlusion_maps = None\n batch_size = self.batch_size or len(inputs)\n\n masks = Occlusion._get_masks((*inputs.shape[1:],), self.patch_size, self.patch_stride)\n base_scores = self.batch_inference_function(self.model, inputs, targets, batch_size)\n\n # since the number of masks is often very large, we process the entries one by one\n for single_input, single_target, single_base_score in zip(inputs, targets, base_scores):\n\n occlusion_map = tf.zeros(masks.shape[1:])\n\n for batch_masks in batch_tensor(masks, batch_size):\n\n occluded_inputs = Occlusion._apply_masks(single_input, batch_masks,\n self.occlusion_value)\n repeated_targets = repeat_labels(single_target[tf.newaxis, :], len(batch_masks))\n\n batch_scores = self.inference_function(self.model,\n occluded_inputs,\n repeated_targets)\n\n batch_sensitivity = Occlusion._compute_sensitivity(\n single_base_score, batch_scores, batch_masks\n )\n\n occlusion_map += batch_sensitivity\n\n occlusion_maps = occlusion_map if occlusion_maps is None else \\\n tf.concat([occlusion_maps, occlusion_map], axis=0)\n\n return occlusion_maps\n\n @staticmethod\n def _get_masks(input_shape: Union[Tuple[int, int, int], Tuple[int, int], Tuple[int]],\n patch_size: Union[int, Tuple[int, int]],\n patch_stride: Union[int, Tuple[int, int]]) -> tf.Tensor:\n \"\"\"\n Create all the possible patches for the given configuration.\n\n Parameters\n ----------\n input_shape\n Desired shape, dimension of one sample.\n patch_size\n Size of the patches to apply.\n patch_stride\n Stride between two patches.\n\n Returns\n -------\n occlusion_masks\n The boolean occlusion masks, same shape as the inputs, with 1 as occluded.\n \"\"\"\n masks = []\n\n # check if we have tabular data\n is_tabular = len(input_shape) == 1\n\n if is_tabular:\n x_anchors = [x * patch_stride for x in\n range(0, ceil((input_shape[0] - patch_size + 1) / patch_stride))]\n\n for x_anchor in x_anchors:\n mask = np.zeros(input_shape, dtype=bool)\n mask[x_anchor:x_anchor + patch_size] = 1\n masks.append(mask)\n else:\n x_anchors = [x * patch_stride[0] for x in\n range(0, ceil((input_shape[0] - patch_size[0] + 1) / patch_stride[0]))]\n y_anchors = [y * patch_stride[1] for y in\n range(0, ceil((input_shape[1] - patch_size[1] + 1) / patch_stride[1]))]\n\n for x_anchor in x_anchors:\n for y_anchor in y_anchors:\n mask = np.zeros(input_shape[:2], dtype=bool)\n mask[x_anchor:x_anchor + patch_size[0], y_anchor:y_anchor + patch_size[1]] = 1\n masks.append(mask)\n\n return tf.cast(masks, dtype=tf.bool)\n\n @staticmethod\n @tf.function\n def _apply_masks(current_input: tf.Tensor,\n masks: tf.Tensor,\n occlusion_value: float) -> tf.Tensor:\n \"\"\"\n Given input samples and an occlusion mask template, apply it for every sample.\n\n Parameters\n ----------\n current_input\n Input samples to be explained.\n masks\n The boolean occlusion masks, with 1 as occluded.\n occlusion_value\n Value used as occlusion.\n\n Returns\n -------\n occluded_inputs\n All the occluded combinations for each inputs.\n \"\"\"\n occluded_inputs = tf.expand_dims(current_input, axis=0)\n occluded_inputs = tf.repeat(occluded_inputs, repeats=len(masks), axis=0)\n\n # check if current input shape is (W, H, C)\n has_channels = len(current_input.shape) > 2\n if has_channels:\n masks = tf.expand_dims(masks, axis=-1)\n masks = tf.repeat(masks, repeats=current_input.shape[-1], axis=-1)\n\n occluded_inputs = occluded_inputs * tf.cast(tf.logical_not(masks), tf.float32)\n occluded_inputs += tf.cast(masks, tf.float32) * occlusion_value\n\n return occluded_inputs\n\n @staticmethod\n @tf.function\n def _compute_sensitivity(baseline_scores: tf.Tensor,\n occluded_scores: tf.Tensor,\n masks: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Compute the sensitivity score given the score of the occluded inputs\n\n Parameters\n ----------\n baseline_scores\n Scores obtained with the original inputs (not occluded)\n occluded_scores\n Scores of the occluded combinations for the class of\n interest.\n masks\n The boolean occlusion masks, with 1 as occluded.\n\n Returns\n -------\n sensitivity\n Value reflecting the effect of each occlusion patchs on the output\n \"\"\"\n baseline_scores = tf.expand_dims(baseline_scores, axis=-1)\n occluded_scores = tf.reshape(occluded_scores, (-1, len(masks)))\n\n score_delta = baseline_scores - occluded_scores\n # reshape the delta score to fit masks\n score_delta = tf.reshape(score_delta, (*score_delta.shape, *(1,) * len(masks.shape[1:])))\n\n sensitivity = score_delta * tf.cast(masks, tf.float32)\n sensitivity = tf.reduce_sum(sensitivity, axis=1)\n\n return sensitivity\n"
] | [
[
"tensorflow.zeros",
"tensorflow.concat",
"numpy.zeros",
"tensorflow.expand_dims",
"tensorflow.logical_not",
"tensorflow.reduce_sum",
"tensorflow.repeat",
"tensorflow.cast"
]
] |
pzwickl/PZ-Thesis | [
"39354e724d11249a3b7e7082b4519b05af5faaa3"
] | [
"VNQ/vod/configuration.py"
] | [
"__author__ = 'patrick'\n\nimport numpy as np\n\npound_to_euro = 1.2618898\ndollar_to_euro = 1\n\ndef dollar(value):\n return value*dollar_to_euro\n\ndef pound(value):\n return value*pound_to_euro\n\ndef million(value):\n return value * 1000 * 1000\n\ndef billion(value):\n return 1000 * million(value)\n\n##############################################################################\n\n# BASE CALCULATIONS\n# Taken from the Globecom paper we can perfectly encode a Blue-ray video in 32768 kBit/s that is 4096 kByte/s\n# Suppose every movie is 90 minutes long we have 90*60*4096, which is 22.1184 Gigabyte\n# Average movie size does not exceed 22.1184 GB\n_movie_size = 22.1184 # in GB\n\n# Let's assume a user watches 2 videos on demand per week = ~ 8 per month (so, if the user only uses it for this purpose, we can divide\n# the monthly costs by 8 (or lower depending on data caps)\n_usages = 8\n\n# MARGINS\n\n# BT profit margin of 11.92 % in March, 2014 http://ycharts.com/companies/BT/profit_margin\n# Comcast profit margin of 10.75 % in March, 2014 http://ycharts.com/companies/CMCSA/profit_margin\n# Due to profitable extra services this may be on the high side\n#######################################################################################\n\n_nsp_margin = (0.1192+0.1075)/2\n_nsp_cost_lv = 1.0 - _nsp_margin\n\n# Wuaki TV: 18.8% profit marin according to http://www.axonpartnersgroup.com/index.php/project/wuaki-tv/\n# Netflix profit margin: 4.18% in March 2014 http://ycharts.com/companies/NFLX/profit_margin\n\n_vod_cost_lv= 1 - 0.188 # content costs plus network transmission costs. Let's find some better estimates.\n_vod_flat_cost_lv= 1 - 0.0418 # content costs plus network transmission costs. Let's find some better estimates.\n\n# Transit constants:\n# 1 Mbps is up to 305.939 GB per month\n# 30 Mbps is up to 9178.17 per month\nmbps_price_to_GB = 1/305.939\n\n_transit_price = dollar(1.57) # Dr. Peering projection for 2013\n_transit_price_per_GB = _transit_price * mbps_price_to_GB\n_transit_movie_price = _transit_price_per_GB*_movie_size\n\n# PLAYER COUNT\n_number_of_tier1_players = 13\n_number_of_CDNS = 23\n_number_of_transit_players = _number_of_tier1_players + _number_of_CDNS\n_number_of_accessUSplayers = 3\n_number_of_accessUKplayers = 11\n_number_of_EC_subscribers = million(1.7) # Q4 2013; according to http://point-topic.com/free-analysis/global-iptv-subscriber-numbers-q4-2013/\n_number_of_vod_players = 12 # includes rental substitutes\n\n\n# DEP. FROM FUNGIBILITY (see spreadsheet)\n_dep_fung_payment = 0 # Money or equivalent\n_dep_fung_accessuk_ec = 0.8 # Network connectivity (Internet access to be precise) provided to the end customer\n_dep_fung_US_access = 0.6 # Network connectivity provider to a content provider (e.g. VoD Platform)\n_dep_fung_UK_access_termination = 0.6 # Termination service by the access NSP in the UK provided to the transit\n_dep_fung_vod_ec = 0.8\n_dep_fung_transit = 0.4\n\n# ENTITY SIZES / MARKET SIZES\n\nUK_VoD_size = pound(million(200)) # TODO: Seems only subscription-based for now. Maybe other data?. Commecial data from statista would exist, but access not purchased.\nGlobal_VoD_size = dollar(billion(21.08))\nAccess_US_size = pound(billion(32)) # Ofcom -> POUNDS\nTransit_size = dollar(billion(2.1+3.71)) # globally transit including CDN\nAccess_UK_size = pound(billion(4)) # Ofcom -> POUNDS\n\n# Approach: Use Global size data for the VoD platform (they are global players) and the UK data for the EC market\n\nentity_sizes = [UK_VoD_size, Global_VoD_size, Access_US_size, Transit_size, Access_UK_size]\n\n\n## TRANSIT\n_nsp_margin = (0.1192+0.1075)/2\n_nsp_cost_lv = 1 -_nsp_margin\n\nclassical_transit_prices = [_transit_movie_price] * _number_of_tier1_players # Create a list of Tier-1 Transit prices, i.e., all the same prices here\n\n# CDN Substitutes\nCDN77 = 47.0/1000*_movie_size # CDN 77\nMaxCDN = 50.55/1000*_movie_size # MaxCDN\nAkamai = 350.0/1000*_movie_size # Akamai\nEdgecase = 202.0/1000*_movie_size # Edgecase\nCloudFront = 97.0/1000*_movie_size # CloudFront\nFastly = 97.0/1000*_movie_size # Fastly\nAmazon = 97.0/1000*_movie_size # Amazon\nHibernia = 97.0/1000*_movie_size # Hibernia\nLeaseweb = 37.59625/1000*_movie_size # Leaseweb\n\n# CDN Substitutes with known prices C1\nknown_cdn_prices = [CDN77,MaxCDN,Akamai,Edgecase,CloudFront,Fastly,Amazon,Hibernia,Leaseweb]\ncdn_prices = []\ncdn_prices.extend(known_cdn_prices) # extra list only used for avoiding mistakes later on\n\nremaining_cdns = 14 # Number of CDNs with unknown prices\n#average_cdn_price = 115.3495834 # per TB\n#print(average_cdn_price)\n#print(str(np.average(known_cdn_prices)))\n\n# CDNs where price is not known\nfor i in range(0,remaining_cdns,1):\n cdn_prices.append(np.average(known_cdn_prices))\n #cdn_list_payment.append(dao.RelationshipInstance('accessUStoTransit_AVGCDN', (average_cdn_price + _transit_movie_price), 0, 0, 0.0))\n\n# CDN Substitutes with known prices C2\n#cdn_list = []\n\n\n#for i in range(0,remaining_cdns,1):\n #cdn_list.append(dao.RelationshipInstance('TransitToAccessUS_AVGCDN', (average_cdn_price + _transit_movie_price), (_transit_movie_price + _nsp_cost_lv*average_cdn_price), 0, _dep_fung_transit))\n\n# In total we have considered 23 CDNs\n\n\n## ACCESS US\n\nsum_us_access_price = np.sum([39.99, 70.99, 79.99])\n\nrc_transit = _transit_movie_price * 39.99/sum_us_access_price # own fee. 39.99 is the access pricing for consumers\ntcn_transit = _transit_movie_price * 70.99/sum_us_access_price # own fee\nverizon_transit = _transit_movie_price * 79.99/sum_us_access_price # own fee\n\nus_access_prices_transitscheme = [rc_transit, tcn_transit, verizon_transit]\n\n## VOD\n\nvod_nowtv_price = pound(3.49)\nvod_lovefilm_price = pound(3.49)\nvod_blinkbox_price = pound(4.49)\nvod_itunes_price = pound(4.49)\nvod_wuaki_price = pound(4.49)\nvod_google_play = pound(4.49)\nvod_unlimited_tv = pound(4.49)\n# 3 instances where price tag has not been found -> use average of 4.09 pounds\nvod_average = pound(4.20) # Refers to virgincable, btvision, filmflex\nvod_netflix_price = pound(6.99)/_usages\nvod_lovefilm_alt_price = pound(5.99)/_usages\n\nvod_prices_classic = [vod_nowtv_price, vod_lovefilm_price, vod_blinkbox_price, vod_itunes_price, vod_wuaki_price, vod_unlimited_tv, vod_google_play, vod_average, vod_average , vod_average]\nvod_prices_flat = [vod_netflix_price, vod_lovefilm_alt_price]\nvod_prices = []\nvod_prices.extend(vod_prices_classic)\nvod_prices.extend(vod_prices_flat)\n\n# ACCESS UK\n\n#Basic E Values\nvirgin_cable = pound(26.50/_usages)\nsky = pound(35.0/_usages)\nplusnet = pound(31.0/_usages)\ntalktalktalk = pound(22.7/_usages)\nee = pound(37.7/_usages)\ndirectsavetelecom = pound(29.95/_usages)\njohnlewis = pound(38.50/_usages)\neclipse = pound(39.95/_usages)\nzen = pound(34.94/_usages)\n# With data cap\nBT = pound(31.0/1) #BT with 20 GB (normal competitor = one movie) ... floor(20/movie_size) would be 0, but it should work in practice.\nee_alt = pound(30.0/1) #EE with 25 GB (Substitute) = one movie\n\nuk_access_prices = [virgin_cable, sky, plusnet, talktalktalk, ee, directsavetelecom, johnlewis, eclipse, zen, BT, ee_alt]\n\n"
] | [
[
"numpy.average",
"numpy.sum"
]
] |
Lauffenburger-Lab/Artificial-Signaling-Network | [
"707e79c7e2ad341d68a719443b9e17fe9e7bb7c1"
] | [
"Model/ligandScreenCrossValidationResults.py"
] | [
"import torch\nimport numpy\nimport matplotlib.pyplot as plt\nimport bionetwork\nimport plotting\nimport pandas\nfrom scipy.stats import pearsonr\nimport seaborn as sns\nfrom scipy.stats import mannwhitneyu\nfrom sklearn.linear_model import LinearRegression\n\n#Load network\nnetworkList, nodeNames, modeOfAction = bionetwork.loadNetwork('data/ligandScreen-Model.tsv')\nannotation = pandas.read_csv('data/ligandScreen-Annotation.tsv', sep='\\t')\nuniprot2gene = dict(zip(annotation['code'], annotation['name']))\nligandInput = pandas.read_csv('data/ligandScreen-Ligands.tsv', sep='\\t', low_memory=False, index_col=0)\nTFOutput = pandas.read_csv('data/ligandScreen-TFs.tsv', sep='\\t', low_memory=False, index_col=0)\n\nCVconditions = pandas.read_csv('CVligandScreen/conditions.tsv', sep='\\t')\ncriterion = torch.nn.MSELoss(reduction='mean')\n\n\n#Subset input and output to intersecting nodes\ninName = ligandInput.columns.values\noutName = TFOutput.columns.values\ninName = numpy.intersect1d(nodeNames, inName)\noutName = numpy.intersect1d(nodeNames, outName)\ninNameGene = [uniprot2gene[x] for x in inName]\noutNameGene = [uniprot2gene[x] for x in outName]\nligandInput = ligandInput.loc[:,inName]\nTFOutput = TFOutput.loc[:,outName]\n\nsampleName = ligandInput.index.values\nX = torch.tensor(ligandInput.values.copy(), dtype=torch.double)\nY = torch.tensor(TFOutput.values, dtype=torch.double)\n\nfolder = 'figures/Figure 6/'\n#%%\ntestedConditions = CVconditions['Condition'].values\ndictionary = dict(zip(sampleName, list(range(len(sampleName)))))\nconditionOrder = numpy.array([dictionary[x] for x in testedConditions])\nfolds = CVconditions.Index.values\nfoldNames = numpy.unique(folds)\nNfolds = len(foldNames)\nselectedSamples = sampleName[conditionOrder]\n\n\nreferenceX = X[conditionOrder,:]\nreferenceY = Y[conditionOrder,:]\npredictionY = torch.zeros(referenceY.shape).double()\n\nresultsY = torch.zeros(Nfolds, referenceY.shape[0], referenceY.shape[1])\n\nsampleCorrelations = numpy.zeros((Nfolds, referenceY.shape[0]))\ntfCorrelations = numpy.zeros((Nfolds, referenceY.shape[1]))\ntfCorrelationPredict = numpy.zeros((Nfolds, referenceY.shape[1]))\n\nsamplePrediction = numpy.zeros(referenceY.shape[0])\n\n\n# trainFit = numpy.zeros(len(conditionOrder))\n# testFit = numpy.zeros(len(conditionOrder))\n\nfor i in range(Nfolds):\n curModel = torch.load('CVligandScreen/MML_model_' + str(i) + '.pt')\n Yhat, YhatFull = curModel(referenceX)\n resultsY[i,:,:] = Yhat\n predictionMap= folds==i\n trainMap = numpy.logical_not(predictionMap)\n predictionY[predictionMap, :] = Yhat[predictionMap, :]\n\n # trainFit[i] = criterion(Yhat[trainMap,:], referenceY[trainMap,:]).item()\n # testFit[i] = criterion(Yhat[predictionMap,:], referenceY[predictionMap,:]).item() #might need reshaping\n\n for j in range(len(conditionOrder)):\n r, p = pearsonr(Yhat[j, :].detach().numpy(), referenceY[j, :].numpy())\n if numpy.isnan(r):\n r = 0\n if predictionMap[j]:\n sampleCorrelations[i, j] = numpy.NaN\n else:\n sampleCorrelations[i, j] = r\n\n for j in range(Y.shape[1]):\n A = Yhat[:, j].detach().numpy()\n B = referenceY[:, j].numpy()\n A = numpy.delete(A, predictionMap)\n B = numpy.delete(B, predictionMap)\n r, p = pearsonr(A, B)\n if numpy.isnan(r):\n r = 0\n tfCorrelations[i, j] = r\n\n # for j in range(Y.shape[1]):\n # A = Yhat[:, j].detach().numpy()\n # B = referenceY[:, j].numpy()\n # A = numpy.delete(A, numpy.logical_not(predictionMap))\n # B = numpy.delete(B, numpy.logical_not(predictionMap))\n # r, p = pearsonr(A, B)\n # if numpy.isnan(r):\n # r = 0\n # tfCorrelationPredict[i, j] = r\n\n\n\nsamplePrediction = plotting.calculateCorrelations(referenceY.T, predictionY.T)\npredictionCorrelations = plotting.calculateCorrelations(referenceY, predictionY)\n\n\n\nscrambledY = torch.zeros(referenceY.shape, dtype=referenceY.dtype)\n\nfor i in range(Nfolds):\n curModel = torch.load('CVligandScreen/MML_sramble_model_' + str(i) + '.pt')\n Yhat, YhatFull = curModel(referenceX)\n predictionMap= folds==i\n trainMap = numpy.logical_not(predictionMap)\n scrambledY[predictionMap, :] = Yhat[predictionMap, :]\n\n\nsamplePredictionScrambled = plotting.calculateCorrelations(referenceY.T, scrambledY.T) \npredictionCorrelationsScrambled = plotting.calculateCorrelations(referenceY, scrambledY)\n\n\nreluY = torch.zeros(referenceY.shape, dtype=referenceY.dtype)\n\nfor i in range(Nfolds):\n curModel = torch.load('CVligandScreen/leakyRelu_model_' + str(i) + '.pt')\n Yhat, YhatFull = curModel(referenceX)\n predictionMap= folds==i\n trainMap = numpy.logical_not(predictionMap)\n reluY[predictionMap, :] = Yhat[predictionMap, :]\n\nsamplePredictionRelu = plotting.calculateCorrelations(referenceY.T, scrambledY.T) \npredictionCorrelationsRelu = plotting.calculateCorrelations(referenceY, scrambledY)\n\n\n#curModel = torch.load('CVmacrophage/model_scramble.pt')\n#scrambledY, YhatFull = curModel(referenceX)\n#scrambledCorrelation = plotting.calculateCorrelations(referenceY, scrambledY)\n#scrambledConditions = plotting.calculateCorrelations(referenceY.T, scrambledY.T)\n\n\n#%%\nplt.rcParams[\"figure.figsize\"] = (3,3)\n\nplt.figure()\nA = predictionY.detach().numpy()\nB = referenceY.detach().numpy()\n# A = A[:, failedTfs==False]\n# B = B[:, failedTfs==False]\ndf = pandas.DataFrame((A.flatten(),B.flatten()), index=['Prediction', 'Data']).T\nplt.scatter(df['Prediction'], df['Data'], alpha=0.02)\nr, p = pearsonr(A.flatten(), B.flatten())\n\n# A = predictionY[:, failedTfs].detach().numpy()\n# B = referenceY[:, failedTfs].detach().numpy()\n# plt.scatter(A, B, alpha=0.1, color='red')\nplt.xlabel('Prediction')\nplt.ylabel('Data')\nplt.gca().axis('equal')\nplt.gca().set_xticks([0, 0.5, 1])\nplt.gca().set_yticks([0, 0.5, 1])\nplotting.lineOfIdentity()\nplt.text(0, 0.9, 'r {:.2f}\\np {:.2e}'.format(r, p))\nplt.savefig(folder + 'C.svg')\ndf.to_csv(folder + 'C.tsv', sep='\\t')\n\n# axisScale = 40\n# counts, rangeX, rangeY = numpy.histogram2d(A.flatten(), B.flatten(), bins=axisScale, range=[[0, 1], [0, 1]])\n# counts_transformed = numpy.log10(counts+1)\n# ax = sns.heatmap(counts_transformed.T, mask=counts_transformed==0, vmin=0, cmap=\"Blues\", cbar_kws={'label': 'log10(#preditions + 1)'})\n# ax.invert_yaxis()\n# for _, spine in ax.spines.items():\n# spine.set_visible(True)\n# #sns.histplot(df, x=\"Model\", y=\"Reference\", bins=100, cbar=True, cbar_kws={'label': 'number of preditions'}, vmax=50)\n# ax.axis('equal')\n# plt.xlabel('fit.')\n# plt.ylabel('ref.')\n# plt.gca().set_xticks(numpy.linspace(0, axisScale, 5))\n# plt.gca().set_yticks(numpy.linspace(0, axisScale, 5))\n# plt.gca().set_xlim([0, axisScale])\n# plt.gca().set_ylim([0, axisScale])\n# plt.gca().set_xticklabels(numpy.linspace(0, 1, 5), rotation = 0)\n# plt.gca().set_yticklabels(numpy.linspace(0, 1, 5))\n# plt.gca().set_xlabel('Model')\n# plt.gca().set_xlabel('Reference')\n# r, p = pearsonr(A.flatten(), B.flatten())\n# plt.text(0, axisScale *0.9, 'r {:.2f}'.format(r))\n# plotting.lineOfIdentity()\n\n# plt.figure()\n# plt.scatter(trainFit, testFit)\n# plt.xlim(left=0)\n# plt.ylim(bottom=0)\n\n#sampleCorrelations =\n\nplt.figure()\n\ntrainCorrelations = numpy.mean(tfCorrelations, axis=0)\n\ndf = pandas.DataFrame((trainCorrelations, predictionCorrelations), columns=outNameGene, index=['Train', 'Test']).T\n\nplt.scatter(trainCorrelations, predictionCorrelations, alpha=0.5)\n#plt.scatter(scrambledCorrelation, trainCorrelations)\n\nfailedTFCutof = 0.3\nfailedTfs = predictionCorrelations<failedTFCutof\n\nfor i in range(len(trainCorrelations)):\n if predictionCorrelations[i]< failedTFCutof:\n plt.text(trainCorrelations[i], predictionCorrelations[i], outNameGene[i])\nplt.xlabel('Train')\nplt.ylabel('Test')\nplt.xlim(right=1)\n#plotting.lineOfIdentity()\nplt.axis('equal')\nplt.xlim([0, 1])\nplt.ylim([0, 1])\nr, p = pearsonr(trainCorrelations, predictionCorrelations)\nplt.text(0.7, 0.8, 'r={:.1f}'.format(r))\nreg = LinearRegression().fit(predictionCorrelations.reshape(-1, 1), trainCorrelations.reshape(-1, 1))\nY = numpy.array([0, 1])\nX = reg.predict(Y.reshape(-1, 1))\nplt.plot(X, Y, color = 'tab:orange')\nplt.savefig(folder + 'D.svg')\ndf.to_csv(folder + 'D.tsv', sep='\\t')\n\n# plt.figure()\n# plt.scatter(scrambledCorrelation, predictionCorrelations)\n# plt.xlabel('Scrambled Y')\n# plt.ylabel('Cross validation')\n\n\nplt.rcParams[\"figure.figsize\"] = (4, 15)\n# plt.figure()\n# df = pandas.DataFrame(tfCorrelations, columns=outNameGene, index=testedConditions)\n# sns.boxplot(data=df, orient=\"h\", color='grey')\n# Ylocation = numpy.array(range(len(predictionCorrelations)))\n# plt.scatter(predictionCorrelations, Ylocation)\n# plt.scatter(scrambledCorrelation, Ylocation)\nplt.figure()\nsampleOrder = numpy.flip(numpy.argsort(predictionCorrelations))\ndf = pandas.DataFrame(tfCorrelations, columns=outNameGene.copy(), index=foldNames)\ndf = df.iloc[:,sampleOrder]\ndf = pandas.melt(df)\ndf = df.dropna()\nsns.boxplot(x='value', y='variable', data=df, color='grey')\n\n# df = pandas.DataFrame(tfCorrelationPredict, columns=outNameGene.copy(), index=foldNames)\n# df = df.iloc[:,sampleOrder]\n# df = pandas.melt(df)\n# df = df.dropna()\n# sns.boxplot(x='value', y='variable', data=df, color='blue')\n\nYlocation = numpy.array(range(len(predictionCorrelations)))\nplt.scatter(predictionCorrelations[sampleOrder], Ylocation)\n#plt.scatter(predictionCorrelationsScrambled[sampleOrder], Ylocation, alpha=0.2)\n# plt.scatter(scrambledConditions, Ylocation)\n# plt.plot([0, 0], [0, len(samplePrediction)], 'k')\n# U1, p = mannwhitneyu(samplePrediction, scrambledConditions)\n# plt.text(-0.3, 1, 'p {:.2e}'.format(p))\nplt.ylabel('TF')\nplt.xlabel('Correlation')\nplt.xlim(right=1)\nplt.xlim([0, 1])\nplt.savefig(\"figures/ligand screen CV/correlationsTF.svg\") \n\n\nplt.rcParams[\"figure.figsize\"] = (4, 18)\nplt.figure()\nsampleOrder = numpy.flip(numpy.argsort(samplePrediction))\ndf = pandas.DataFrame(sampleCorrelations, columns=testedConditions.copy(), index=foldNames)\ndf = df.iloc[:,sampleOrder]\ndf = pandas.melt(df)\ndf = df.dropna()\nsns.boxplot(x='value', y='variable', data=df, color='grey')\nYlocation = numpy.array(range(len(samplePrediction)))\nplt.scatter(samplePrediction[sampleOrder], Ylocation)\nplt.xlim([-1, 1])\nplt.savefig(\"figures/ligand screen CV/correlationsConditions.svg\") \n#plt.scatter(samplePredictionScrambled[sampleOrder], Ylocation, alpha=0.2)\n\n\n#colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n#Ylocation = [0, len(samplePrediction)]\n\n\n\n\n\n#plt.scatter(scrambledConditions, Ylocation)\n#plt.plot([0, 0], [0, len(samplePrediction)], 'k')\n#U1, p = mannwhitneyu(samplePrediction, scrambledConditions)\n#plt.text(-0.3, 1, 'p {:.2e}'.format(p))\nplt.ylabel('Condition')\nplt.xlabel('Correlation')\nplt.xlim(right=1)\n\nplt.rcParams[\"figure.figsize\"] = (5, 5)\nplt.figure()\nresults = numpy.zeros((Nfolds, 4))\nfor i in range(Nfolds):\n predictionMap= folds==i\n trainMap = numpy.logical_not(predictionMap)\n \n #Train\n r, p = pearsonr(resultsY[i, trainMap,:].detach().numpy().flatten(), referenceY[trainMap,:].numpy().flatten())\n results[i, 0] = r\n \n #Test\n r, p = pearsonr(resultsY[i, predictionMap,:].detach().numpy().flatten(), referenceY[predictionMap,:].numpy().flatten())\n results[i, 1] = r \n \n #Scrambled\n r, p = pearsonr(scrambledY[predictionMap,:].detach().numpy().flatten(), referenceY[predictionMap,:].numpy().flatten())\n results[i, 2] = r \n\n #leaky relu\n r, p = pearsonr(reluY[predictionMap,:].detach().numpy().flatten(), referenceY[predictionMap,:].numpy().flatten())\n results[i, 3] = r \n \n \n\ndf = pandas.DataFrame(results, columns = ['Train', 'Test', 'Scramble', 'Relu'])\nsns.boxplot(data=df)\nplt.ylabel('correlation')\nplt.plot([-0.5, 3.5], [0, 0], color='k')\nplt.ylim([-1, 1])\n\n#r = numpy.mean(samplePrediction)\n#plt.plot([r, r], Ylocation, color=colors[0], alpha=0.5)\n#r = numpy.mean(samplePredictionScrabled)\n#plt.plot([r, r], Ylocation, color=colors[1], alpha=0.5)\n\nU1, p = mannwhitneyu(results[:,1], results[:,2])\nplt.text(1, -0.5, 'p={:.2e}'.format(p))\n\nU1, p = mannwhitneyu(results[:,1], results[:,3])\nprint('Compared to ReLU', p)\n\nmeanR = numpy.mean(results, axis=0)\nstdR = numpy.std(results, axis=0)\nfor i in range(len(meanR)):\n plt.text(i-0.3, -0.85, '{:.2f}±{:.2f}'.format(meanR[i], stdR[i]))\nplt.savefig(\"figures/ligand screen CV/compareCV.svg\") \n\n# plt.rcParams[\"figure.figsize\"] = (5, 5)\n# plt.figure()\n# meanCorrelationTrain = numpy.nanmean(sampleCorrelations, axis=0)\n# plt.scatter(meanCorrelationTrain, samplePrediction)\n\n# failedConditionCutof = 0.5\n\n# for i in range(len(meanCorrelationTrain)):\n# if samplePrediction[i]< failedConditionCutof:\n# plt.text(meanCorrelationTrain[i], samplePrediction[i], testedConditions[i])\n# plt.xlabel('Train')\n# plt.ylabel('Test')\n# plt.xlim(right=1)\n# plotting.lineOfIdentity()\n\n\n\n#plt.legend(['CV', 'Scrambled Y'])\n\n# plt.rcParams[\"figure.figsize\"] = (10,10)\n# plt.figure()\n# rank = plotting.compareAllTFs(Yhat, Y, outNameGene)\n\n# plt.figure()\n# rank = plotting.compareAllTFs(Yhat.T, Y.T, sampleName)\n\n\n# plotting.compareDataAndModel(X.detach(), Y.detach(), Yhat.detach(), sampleName, outNameGene)\n\n# plt.figure()\n# pca = PCA(n_components=4)\n# principalComponents = pca.fit_transform(referenceY.detach().numpy())\n# sc = plt.scatter(principalComponents[:,1], principalComponents[:,2])\n# #plt.colorbar(sc)\n# for i in range(len(testedConditions)):\n# plt.text(principalComponents[i,1], principalComponents[i,2], testedConditions[i])\n# plt.xlabel(pca.explained_variance_ratio_[1])\n# plt.ylabel(pca.explained_variance_ratio_[2])\n\n"
] | [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.xlim",
"sklearn.linear_model.LinearRegression",
"numpy.mean",
"scipy.stats.pearsonr",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"scipy.stats.mannwhitneyu",
"torch.tensor",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axis",
"torch.zeros",
"numpy.array",
"numpy.delete",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.std",
"numpy.intersect1d",
"numpy.argsort",
"pandas.melt",
"numpy.logical_not",
"numpy.isnan",
"torch.nn.MSELoss",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"numpy.unique"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.