version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.7 | import torch
from .casia_dataset import CasiaDataset
class DebugDataset(CasiaDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._debug_image = torch.rand(3, 120, 120)
self._debug_label = torch.tensor(1)
def __getitem__(self, idx):
return {
"data": {str(i):self._debug_image for i in range(len(self.scale_size)+1)},
"label": self._debug_label,
'image_path': "fuck"
}
def __len__(self):
return 200 | [
"torch.rand",
"torch.tensor"
] | 1.7.0 | gyfastas/CS7319E1G16 | 03126af04766abcb269d0c8db481c96c856d21ef |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
import torch
from pytorch_lightning import callbacks, seed_everything, Trainer
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test_mc_called(tmpdir):
seed_everything(1234)
# -----------------
# TRAIN LOOP ONLY
# -----------------
train_step_only_model = BoringModel()
train_step_only_model.validation_step = None
# no callback
trainer = Trainer(max_epochs=3, checkpoint_callback=False)
trainer.fit(train_step_only_model)
assert len(trainer.dev_debugger.checkpoint_callback_history) == 0
# -----------------
# TRAIN + VAL LOOP ONLY
# -----------------
val_train_model = BoringModel()
# no callback
trainer = Trainer(max_epochs=3, checkpoint_callback=False)
trainer.fit(val_train_model)
assert len(trainer.dev_debugger.checkpoint_callback_history) == 0
@mock.patch('torch.save')
@pytest.mark.parametrize(
['epochs', 'val_check_interval', 'expected'],
[(1, 1.0, 1), (2, 1.0, 2), (1, 0.25, 4), (2, 0.3, 7)],
)
def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_interval: float, expected: int):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
weights_summary=None,
val_check_interval=val_check_interval,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
# make sure types are correct
assert save_mock.call_count == expected
@mock.patch('torch.save')
@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [
(1, 1, 1.0, 1),
(2, 2, 1.0, 2),
(2, 1, 0.25, 4),
(2, 2, 0.3, 7),
])
def test_top_k(save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.last_coeff = 10.0
def training_step(self, batch, batch_idx):
loss = self.step(torch.ones(32))
loss = loss / (loss + 0.0000001)
loss += self.last_coeff
self.log('my_loss', loss)
self.last_coeff *= 0.999
return loss
model = TestModel()
trainer = Trainer(
callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor='my_loss', save_top_k=k)],
default_root_dir=tmpdir,
max_epochs=epochs,
weights_summary=None,
val_check_interval=val_check_interval
)
trainer.fit(model)
# make sure types are correct
assert save_mock.call_count == expected
@mock.patch('torch.save')
@RunIf(special=True, min_gpus=2)
@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [(1, 1, 1.0, 1), (2, 2, 0.3, 5)])
def test_top_k_ddp(save_mock, tmpdir, k, epochs, val_check_interval, expected):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
local_rank = int(os.getenv("LOCAL_RANK"))
self.log('my_loss', batch_idx * (1 + local_rank), on_epoch=True)
return super().training_step(batch, batch_idx)
def training_epoch_end(self, outputs) -> None:
data = str(self.global_rank)
obj = [[data], (data, ), set(data)]
out = self.trainer.training_type_plugin.broadcast(obj)
assert obj == [[str(self.global_rank)], (str(self.global_rank), ), set(str(self.global_rank))]
assert out == [['0'], ('0', ), set('0')]
model = TestModel()
trainer = Trainer(
callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor='my_loss_step', save_top_k=k, mode="max")],
default_root_dir=tmpdir,
max_epochs=epochs,
weights_summary=None,
val_check_interval=val_check_interval,
accelerator="ddp",
gpus=2,
limit_train_batches=64,
limit_val_batches=32,
)
if os.getenv("LOCAL_RANK") == "0":
with pytest.raises(UserWarning, match="The value associated to the key my_loss_epoch: [15.5, 31.0]"):
trainer.fit(model)
assert save_mock.call_count == expected
else:
trainer.fit(model)
| [
"torch.ones"
] | 1.4 | alanhdu/pytorch-lightning | b7a22ba046ba57072a71b12d16caff000e66f798 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import Optional
from unittest import mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.accelerators.gpu import GPUAccelerator
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.plugins import (
DDP2Plugin,
DDPPlugin,
DDPShardedPlugin,
DDPSpawnPlugin,
DDPSpawnShardedPlugin,
DeepSpeedPlugin,
ParallelPlugin,
PrecisionPlugin,
SingleDevicePlugin,
)
from pytorch_lightning.plugins.environments import LightningEnvironment, SLURMEnvironment, TorchElasticEnvironment
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
def test_accelerator_choice_cpu(tmpdir):
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.training_type_plugin, SingleDevicePlugin)
def test_accelerator_choice_ddp_cpu(tmpdir):
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp_cpu',
)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPSpawnPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch('torch.cuda.device_count', return_value=2)
@mock.patch('torch.cuda.is_available', return_value=True)
def test_accelerator_choice_ddp(cuda_available_mock, device_count_mock):
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp',
gpus=1,
)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch('torch.cuda.device_count', return_value=2)
@mock.patch('torch.cuda.is_available', return_value=True)
def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock):
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp_spawn',
gpus=1,
)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPSpawnPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment)
@RunIf(min_gpus=2)
@mock.patch.dict(
os.environ, {
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_LOCALID": "10"
}
)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_accelerator_choice_ddp_slurm(setup_distributed_mock):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert trainer.use_ddp
assert trainer.accelerator_connector.is_slurm_managing_tasks
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
assert trainer.training_type_plugin.cluster_environment.local_rank() == 10
assert trainer.training_type_plugin.task_idx == 10
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp',
gpus=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=1)
@mock.patch.dict(
os.environ, {
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "10"
}
)
@mock.patch('torch.cuda.device_count', return_value=2)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_accelerator_choice_ddp2_slurm(device_count_mock, setup_distributed_mock):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert trainer.use_ddp2
assert trainer.accelerator_connector.is_slurm_managing_tasks
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDP2Plugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
assert trainer.training_type_plugin.cluster_environment.local_rank() == 10
assert trainer.training_type_plugin.task_idx == 10
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp2',
gpus=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=1)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1", "WORLD_SIZE": "2", "LOCAL_RANK": "10", "NODE_RANK": "0"})
@mock.patch('torch.cuda.device_count', return_value=2)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_accelerator_choice_ddp_te(device_count_mock, setup_distributed_mock):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert trainer.use_ddp
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment)
assert trainer.training_type_plugin.cluster_environment.local_rank() == 10
assert trainer.training_type_plugin.task_idx == 10
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp',
gpus=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=1)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1", "WORLD_SIZE": "2", "LOCAL_RANK": "10", "NODE_RANK": "0"})
@mock.patch('torch.cuda.device_count', return_value=2)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_accelerator_choice_ddp2_te(device_count_mock, setup_distributed_mock):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert trainer.use_ddp2
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDP2Plugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment)
assert trainer.training_type_plugin.cluster_environment.local_rank() == 10
assert trainer.training_type_plugin.task_idx == 10
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp2',
gpus=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@mock.patch.dict(os.environ, {
"WORLD_SIZE": "1",
"LOCAL_RANK": "10",
"NODE_RANK": "0",
})
@mock.patch('torch.cuda.device_count', return_value=0)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_accelerator_choice_ddp_cpu_te(device_count_mock, setup_distributed_mock):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert trainer.use_ddp
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment)
assert trainer.training_type_plugin.cluster_environment.local_rank() == 10
assert trainer.training_type_plugin.task_idx == 10
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp_cpu',
num_processes=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@mock.patch.dict(
os.environ, {
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "0"
}
)
@mock.patch('torch.cuda.device_count', return_value=0)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_accelerator_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert trainer.use_ddp
assert trainer.accelerator_connector.is_slurm_managing_tasks
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
assert trainer.training_type_plugin.task_idx == 0
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp_cpu',
num_processes=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@mock.patch.dict(
os.environ, {
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "0"
}
)
@mock.patch('torch.cuda.device_count', return_value=0)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_accelerator_choice_ddp_cpu_custom_cluster(device_count_mock, setup_distributed_mock):
"""
Test that we choose the custom cluster even when SLURM or TE flags are around
"""
class CustomCluster(LightningEnvironment):
def master_address(self):
return 'asdf'
def creates_children(self) -> bool:
return True
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert trainer.use_ddp
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPPlugin)
assert isinstance(trainer.training_type_plugin.cluster_environment, CustomCluster)
raise SystemExit()
model = BoringModel()
trainer = Trainer(
plugins=[CustomCluster()],
fast_dev_run=True,
accelerator='ddp_cpu',
num_processes=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@mock.patch.dict(
os.environ, {
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "0"
}
)
@mock.patch('torch.cuda.device_count', return_value=0)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_custom_accelerator(device_count_mock, setup_distributed_mock):
class Accel(Accelerator):
pass
class Prec(PrecisionPlugin):
pass
class TrainTypePlugin(SingleDevicePlugin):
pass
accelerator = Accel(
training_type_plugin=TrainTypePlugin(device=torch.device("cpu")),
precision_plugin=Prec(),
)
trainer = Trainer(
accelerator=accelerator,
fast_dev_run=True,
num_processes=2,
)
assert isinstance(trainer.accelerator, Accel)
assert isinstance(trainer.training_type_plugin, TrainTypePlugin)
assert isinstance(trainer.precision_plugin, Prec)
@mock.patch.dict(
os.environ, {
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "0"
}
)
@mock.patch('torch.cuda.device_count', return_value=0)
@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)
def test_dist_backend_accelerator_mapping(device_count_mock, setup_distributed_mock):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.training_type_plugin, DDPPlugin)
assert trainer.training_type_plugin.task_idx == 0
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp_cpu',
num_processes=2,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@mock.patch("pytorch_lightning.utilities._IS_INTERACTIVE", return_value=True)
@mock.patch('torch.cuda.device_count', return_value=2)
def test_ipython_incompatible_backend_error(*_):
with pytest.raises(MisconfigurationException, match="backend ddp is not compatible"):
Trainer(accelerator="ddp", gpus=2)
with pytest.raises(MisconfigurationException, match="backend ddp is not compatible"):
Trainer(accelerator="ddp_cpu", num_processes=2)
with pytest.raises(MisconfigurationException, match="backend ddp2 is not compatible"):
Trainer(accelerator="ddp2", gpus=2)
@pytest.mark.parametrize(
["accelerator", "plugin"],
[('ddp_spawn', 'ddp_sharded'), (None, 'ddp_sharded')],
)
def test_plugin_accelerator_choice(accelerator: Optional[str], plugin: str):
"""Ensure that when a plugin and accelerator is passed in, that the plugin takes precedent."""
trainer = Trainer(accelerator=accelerator, plugins=plugin, num_processes=2)
assert isinstance(trainer.accelerator.training_type_plugin, DDPShardedPlugin)
trainer = Trainer(plugins=plugin, num_processes=2)
assert isinstance(trainer.accelerator.training_type_plugin, DDPShardedPlugin)
@pytest.mark.parametrize(["accelerator", "plugin"], [
('ddp', DDPPlugin),
('ddp_spawn', DDPSpawnPlugin),
('ddp_sharded', DDPShardedPlugin),
('ddp_sharded_spawn', DDPSpawnShardedPlugin),
pytest.param('deepspeed', DeepSpeedPlugin, marks=RunIf(deepspeed=True)),
])
@mock.patch('torch.cuda.is_available', return_value=True)
@mock.patch('torch.cuda.device_count', return_value=2)
def test_accelerator_choice_multi_node_gpu(
mock_is_available, mock_device_count, tmpdir, accelerator: str, plugin: ParallelPlugin
):
trainer = Trainer(
accelerator=accelerator,
default_root_dir=tmpdir,
num_nodes=2,
gpus=2,
)
assert isinstance(trainer.training_type_plugin, plugin)
| [
"torch.device"
] | 1.4 | alanhdu/pytorch-lightning | b7a22ba046ba57072a71b12d16caff000e66f798 |
1.4 |
from typing import Callable, Tuple
import numpy as np
import torch
import torchvision
import foolbox as fb
import time
from tqdm import tqdm
from models.base import AdversarialDefensiveModule
from .base import AdversaryForValid
from .config import *
from .utils import getLogger, mkdirs
class ModelNotDefineError(Exception): pass
class LossNotDefineError(Exception): pass
class OptimNotIncludeError(Exception): pass
class AttackNotIncludeError(Exception): pass
class DatasetNotIncludeError(Exception): pass
# return the num_classes of corresponding data set
def get_num_classes(dataset_type: str) -> int:
if dataset_type in ('mnist', 'fashionmnist', 'svhn', 'cifar10'):
return 10
elif dataset_type in ('cifar100', ):
return 100
elif dataset_type in ('tinyimagenet', ):
return 200
else:
raise DatasetNotIncludeError("Dataset {0} is not included." \
"Refer to the following: {1}".format(dataset_type, _dataset.__doc__))
def load_model(model_type: str) -> Callable[..., torch.nn.Module]:
"""
mnist: the model designed for MNIST dataset
cifar: the model designed for CIFAR dataset
resnet8|20|32|44|110|1202
resnet18|34|50|101|50_32x4d
preactresnet18|34|50|101
wrn_28_10: depth-28, width-10
wrn_34_10: depth-34, width-10
wrn_34_20: depth-34, width-20
"""
resnets = ['resnet8', 'resnet20', 'resnet32', 'resnet44',
'resnet56', 'resnet110', 'resnet1202']
srns = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnext50_32x4d']
prns = ['preactresnet18', 'preactresnet34', 'preactresnet50', 'preactresnet101']
wrns = ['wrn_28_10', 'wrn_34_10', 'wrn_34_20']
model: Callable[..., AdversarialDefensiveModule]
if model_type == "mnist":
from models.mnist import MNIST
model = MNIST
elif model_type == "cifar":
from models.cifar import CIFAR
model = CIFAR
elif model_type in resnets:
import models.resnet as resnet
model = getattr(resnet, model_type)
elif model_type in srns:
import models.cifar_resnet as srn
model = getattr(srn, model_type)
elif model_type in prns:
import models.preactresnet as prn
model = getattr(prn, model_type)
elif model_type in wrns:
import models.wide_resnet as wrn
model = getattr(wrn, model_type)
else:
raise ModelNotDefineError(f"model {model_type} is not defined.\n" \
f"Refer to the following: {load_model.__doc__}\n")
return model
def load_loss_func(loss_type: str) -> Callable:
"""
cross_entropy: the cross entropy loss with logits
cross_entropy_softmax: the cross entropy loss with probs
kl_loss: kl divergence
mse_loss: MSE
"""
loss_func: Callable[..., torch.Tensor]
if loss_type == "cross_entropy":
from .loss_zoo import cross_entropy
loss_func = cross_entropy
elif loss_type == "cross_entropy_softmax":
from .loss_zoo import cross_entropy_softmax
loss_func = cross_entropy_softmax
elif loss_type == "kl_loss":
from .loss_zoo import kl_divergence
loss_func = kl_divergence
elif loss_type == "mse_loss":
from .loss_zoo import mse_loss
loss_func = mse_loss
else:
raise LossNotDefineError(f"Loss {loss_type} is not defined.\n" \
f"Refer to the following: {load_loss_func.__doc__}")
return loss_func
def _dataset(
dataset_type: str,
train: bool = True
) -> torch.utils.data.Dataset:
"""
Dataset:
mnist: MNIST
fashionmnist: FashionMNIST
svhn: SVHN
cifar10: CIFAR-10
cifar100: CIFAR-100
tinyimagenet: Tiny ImageNet 200
"""
if dataset_type == "mnist":
dataset = torchvision.datasets.MNIST(
root=ROOT, train=train, download=DOWNLOAD
)
elif dataset_type == "fashionmnist":
dataset = torchvision.datasets.FashionMNIST(
root=ROOT, train=train, download=DOWNLOAD
)
elif dataset_type == "svhn":
split = 'train' if train else 'test'
dataset = torchvision.datasets.SVHN(
root=ROOT, split=split, download=DOWNLOAD
)
elif dataset_type == "cifar10":
dataset = torchvision.datasets.CIFAR10(
root=ROOT, train=train, download=DOWNLOAD
)
elif dataset_type == "cifar100":
dataset = torchvision.datasets.CIFAR100(
root=ROOT, train=train, download=DOWNLOAD
)
elif dataset_type == "tinyimagenet":
from src.datasets import TinyImageNet
split = 'train' if train else 'val'
dataset = TinyImageNet(root=ROOT, split=split)
else:
raise DatasetNotIncludeError("Dataset {0} is not included." \
"Refer to the following: {1}".format(dataset_type, _dataset.__doc__))
return dataset
def load_normalizer(dataset_type: str, ndim: int = 3) -> Tuple[torch.Tensor]:
size = (-1,) + (1,) * (ndim - 1)
mean = MEANS[dataset_type]
std = STDS[dataset_type]
mean = torch.tensor(mean).view(size)
std = torch.tensor(std).view(size)
return mean, std
def _split_dataset(
dataset: torch.utils.data.Dataset,
ratio: float = .1, seed: int = VALIDSEED,
shuffle: bool = True
) -> Tuple[torch.utils.data.Dataset]:
from torch.utils.data import Subset
datasize = len(dataset)
indices = list(range(datasize))
if shuffle:
np.random.seed(seed)
np.random.shuffle(indices)
validsize = int(ratio * datasize)
getLogger().info(f"[Dataset] Split the dataset into trainset({datasize-validsize}) and validset({validsize}) ...")
train_indices, valid_indices = indices[validsize:], indices[:validsize]
trainset = Subset(dataset, train_indices)
validset = Subset(dataset, valid_indices)
return trainset, validset
def load_dataset(
dataset_type: str,
transforms: str ='default',
ratio: float = 0.1,
seed: int = VALIDSEED,
shuffle: bool = True,
train: bool = True
) -> torch.utils.data.Dataset:
from .datasets import WrapperSet
dataset = _dataset(dataset_type, train)
if train:
transforms = TRANSFORMS[dataset_type] if transforms == 'default' else transforms
getLogger().info(f"[Dataset] Apply transforms of '{transforms}' to trainset ...")
trainset, validset = _split_dataset(dataset, ratio, seed, shuffle)
trainset = WrapperSet(trainset, transforms=transforms)
validset = WrapperSet(validset, transforms=TRANSFORMS['validation'])
return trainset, validset
else:
getLogger().info(f"[Dataset] Apply transforms of '{transforms}' to testset ...")
testset = WrapperSet(dataset, transforms=transforms)
return testset
class _TQDMDataLoader(torch.utils.data.DataLoader):
def __iter__(self):
return iter(
tqdm(
super(_TQDMDataLoader, self).__iter__(),
leave=False, desc="վ'ᴗ' ի-"
)
)
def load_dataloader(
dataset: torch.utils.data.Dataset,
batch_size: int,
train: bool = True,
show_progress: bool = False
) -> torch.utils.data.DataLoader:
dataloader = _TQDMDataLoader if show_progress else torch.utils.data.DataLoader
if train:
loader = dataloader(
dataset, batch_size=batch_size, shuffle=True,
num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY
)
else:
loader = dataloader(
dataset, batch_size=batch_size, shuffle=False,
num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY
)
return loader
def load_optimizer(
model: torch.nn.Module,
optim_type: str, *,
lr: float = 0.1, momentum: float = 0.9,
betas: Tuple[float, float] = (0.9, 0.999),
weight_decay: float = 1e-4,
nesterov: bool = False,
**kwargs: "other hyper-parameters for optimizer"
) -> torch.optim.Optimizer:
"""
sgd: SGD
adam: Adam
"""
try:
cfg = OPTIMS[optim_type]
except KeyError:
raise OptimNotIncludeError(f"Optim {optim_type} is not included.\n" \
f"Refer to the following: {load_optimizer.__doc__}")
kwargs.update(lr=lr, momentum=momentum, betas=betas,
weight_decay=weight_decay, nesterov=nesterov)
cfg.update(**kwargs) # update the kwargs needed automatically
logger = getLogger()
logger.info(cfg)
if optim_type == "sgd":
optim = torch.optim.SGD(model.parameters(), **cfg)
elif optim_type == "adam":
optim = torch.optim.Adam(model.parameters(), **cfg)
return optim
def load_learning_policy(
optimizer: torch.optim.Optimizer,
learning_policy_type: str,
**kwargs: "other hyper-parameters for learning scheduler"
) -> "learning policy":
"""
default: (100, 105), 110 epochs suggested
null:
STD: (82, 123), 164 epochs suggested
STD-wrn: (60, 120, 160), 200 epochs suggested
AT: (102, 154), 200 epochs suggested
TRADES: (75, 90, 100), 76 epochs suggested
TRADES-M: (55, 75, 90), 100 epochs suggested
cosine: CosineAnnealingLR, kwargs: T_max, eta_min, last_epoch
"""
try:
learning_policy_ = LEARNING_POLICY[learning_policy_type]
except KeyError:
raise NotImplementedError(f"Learning_policy {learning_policy_type} is not defined.\n" \
f"Refer to the following: {load_learning_policy.__doc__}")
lp_type = learning_policy_[0]
lp_cfg = learning_policy_[1]
lp_cfg.update(**kwargs) # update the kwargs needed automatically
logger = getLogger()
logger.info(f"{lp_cfg} {lp_type}")
learning_policy = getattr(
torch.optim.lr_scheduler,
lp_type
)(optimizer, **lp_cfg)
return learning_policy
def load_fb_attack(attack_type: str, steps: int, stepsize: float) -> fb.attacks.Attack:
"""
pgd-linf: \ell_{\infty} rel_stepsize=stepsize, steps=steps;
pgd-l1: \ell_1 version;
pgd-l2: \ell_2 version;
fgsm: no hyper-parameters;
cw-l2: stepsize=stepsize, steps=steps;
ead: initial_stepsize=stepsize, steps=steps;
slide: \ell_1 attack, rel_stepsize=stepsize, steps=steps;
deepfool-linf: \ell_{\infty} version, overshoot=stepsize, steps=steps;
deepfool-l2: \ell_2 version;
bba-inf: \ell_{infty} version, lr=stepsize, steps=steps, overshott=1.1;
bba-l1: \ell_1 version;
bba-l2: \ell_2 version
"""
attack: fb.attacks.Attack
if attack_type == "pgd-linf":
attack = fb.attacks.LinfPGD(
rel_stepsize=stepsize,
steps=steps
)
elif attack_type == "pgd-l2":
attack = fb.attacks.L2PGD(
rel_stepsize=stepsize,
steps=steps
)
elif attack_type == "pgd-l1":
attack = fb.attacks.L1PGD(
rel_stepsize=stepsize,
steps=steps
)
elif attack_type == "fgsm":
attack = fb.attacks.LinfFastGradientAttack(
random_start=False
)
elif attack_type == "cw-l2":
attack = fb.attacks.L2CarliniWagnerAttack(
stepsize=stepsize,
steps=steps
)
elif attack_type == "ead":
attack = fb.attacks.EADAttack(
initial_stepsize=stepsize,
steps=steps
)
elif attack_type == "slide":
attack = fb.attacks.SparseL1DescentAttack(
rel_stepsize=stepsize,
steps=steps
)
elif attack_type == "deepfool-linf":
attack = fb.attacks.LinfDeepFoolAttack(
overshoot=stepsize,
steps=steps
)
elif attack_type == "deepfool-l2":
attack = fb.attacks.L2DeepFoolAttack(
overshoot=stepsize,
steps=steps
)
elif attack_type == "bba-linf":
attack = fb.attacks.LinfinityBrendelBethgeAttack(
lr=stepsize,
steps=steps
)
elif attack_type == "bba-l2":
attack = fb.attacks.L2BrendelBethgeAttack(
lr=stepsize,
steps=steps
)
elif attack_type == "bba-l1":
attack = fb.attacks.L1BrendelBethgeAttack(
lr=stepsize,
steps=steps
)
else:
raise AttackNotIncludeError(f"Attack {attack_type} is not included.\n" \
f"Refer to the following: {load_fb_attack.__doc__}")
return attack
def load_attack(
attack_type: str, epsilon: float,
steps: int, stepsize: float,
random_start: bool = True, bounds: Tuple[float] = BOUNDS
) -> Callable:
'''
pgd-linf: \ell_{\infty};
pgd-l2: \ell_2 version;
pgd-linf-kl: \ell_{infty} with kl divergence
pgd-l2l-kl: \ell_2 with kl divergence
'''
if attack_type == 'pgd-linf':
from .attacks import LinfPGD
attack = LinfPGD
elif attack_type == 'pgd-l2':
from .attacks import L2PGD
attack = L2PGD
elif attack_type == 'pgd-linf-kl':
from .attacks import LinfPGDKLdiv
attack = LinfPGDKLdiv
elif attack_type == 'pgd-l2-kl':
from .attacks import L2PGDKLdiv
attack = L2PGDKLdiv
else:
raise AttackNotIncludeError(f"Attack {attack_type} is not included.\n" \
f"Refer to the following: {load_attack.__doc__}")
attack = attack(
epsilon=epsilon, steps=steps, stepsize=stepsize,
random_start=random_start, bounds=bounds
)
return attack
def load_valider(
model: torch.nn.Module, dataset_type: str, device: torch.device = DEVICE,
) -> AdversaryForValid:
cfg = VALIDER[dataset_type]
attack = load_attack(**cfg)
valider = AdversaryForValid(
model=model, attacker=attack, device=device
)
return valider
def generate_path(
method: str, dataset_type: str, model:str, description: str
) -> Tuple[str, str]:
info_path = INFO_PATH.format(
method=method,
dataset=dataset_type,
model=model,
description=description
)
log_path = LOG_PATH.format(
method=method,
dataset=dataset_type,
model=model,
description=description,
time=time.strftime(TIMEFMT)
)
mkdirs(info_path, log_path)
return info_path, log_path
| [
"torch.utils.data.Subset",
"torch.tensor"
] | 1.4.0 | MTandHJ/PyTorch-Robust | 3f046fce515a7ed66ab34079329cd3496ca5087c |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, List, Mapping, Optional, Sequence, Union
import torch
import torch.nn.functional as F
import torchmetrics
from pytorch_lightning.utilities import rank_zero_warn
from flash.core.data.data_source import LabelsState
from flash.core.data.process import Serializer
from flash.core.model import Task
def binary_cross_entropy_with_logits(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""Calls BCE with logits and cast the target one_hot (y) encoding to floating point precision."""
return F.binary_cross_entropy_with_logits(x, y.float())
class ClassificationTask(Task):
def __init__(
self,
*args,
loss_fn: Optional[Callable] = None,
metrics: Union[torchmetrics.Metric, Mapping, Sequence, None] = None,
multi_label: bool = False,
serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = None,
**kwargs,
) -> None:
if metrics is None:
metrics = torchmetrics.Accuracy(subset_accuracy=multi_label)
if loss_fn is None:
loss_fn = binary_cross_entropy_with_logits if multi_label else F.cross_entropy
super().__init__(
*args,
loss_fn=loss_fn,
metrics=metrics,
serializer=serializer or Classes(multi_label=multi_label),
**kwargs,
)
def to_metrics_format(self, x: torch.Tensor) -> torch.Tensor:
if getattr(self.hparams, "multi_label", False):
return torch.sigmoid(x)
# we'll assume that the data always comes as `(B, C, ...)`
return torch.softmax(x, dim=1)
class ClassificationSerializer(Serializer):
"""A base class for classification serializers.
Args:
multi_label: If true, treats outputs as multi label logits.
"""
def __init__(self, multi_label: bool = False):
super().__init__()
self._mutli_label = multi_label
@property
def multi_label(self) -> bool:
return self._mutli_label
class Logits(ClassificationSerializer):
"""A :class:`.Serializer` which simply converts the model outputs (assumed to be logits) to a list."""
def serialize(self, sample: Any) -> Any:
return sample.tolist()
class Probabilities(ClassificationSerializer):
"""A :class:`.Serializer` which applies a softmax to the model outputs (assumed to be logits) and converts to a
list."""
def serialize(self, sample: Any) -> Any:
if self.multi_label:
return torch.sigmoid(sample).tolist()
return torch.softmax(sample, -1).tolist()
class Classes(ClassificationSerializer):
"""A :class:`.Serializer` which applies an argmax to the model outputs (either logits or probabilities) and
converts to a list.
Args:
multi_label: If true, treats outputs as multi label logits.
threshold: The threshold to use for multi_label classification.
"""
def __init__(self, multi_label: bool = False, threshold: float = 0.5):
super().__init__(multi_label)
self.threshold = threshold
def serialize(self, sample: Any) -> Union[int, List[int]]:
if self.multi_label:
one_hot = (sample.sigmoid() > self.threshold).int().tolist()
result = []
for index, value in enumerate(one_hot):
if value == 1:
result.append(index)
return result
return torch.argmax(sample, -1).tolist()
class Labels(Classes):
"""A :class:`.Serializer` which converts the model outputs (either logits or probabilities) to the label of the
argmax classification.
Args:
labels: A list of labels, assumed to map the class index to the label for that class. If ``labels`` is not
provided, will attempt to get them from the :class:`.LabelsState`.
multi_label: If true, treats outputs as multi label logits.
threshold: The threshold to use for multi_label classification.
"""
def __init__(self, labels: Optional[List[str]] = None, multi_label: bool = False, threshold: float = 0.5):
super().__init__(multi_label=multi_label, threshold=threshold)
self._labels = labels
if labels is not None:
self.set_state(LabelsState(labels))
def serialize(self, sample: Any) -> Union[int, List[int], str, List[str]]:
labels = None
if self._labels is not None:
labels = self._labels
else:
state = self.get_state(LabelsState)
if state is not None:
labels = state.labels
classes = super().serialize(sample)
if labels is not None:
if self.multi_label:
return [labels[cls] for cls in classes]
return labels[classes]
else:
rank_zero_warn("No LabelsState was found, this serializer will act as a Classes serializer.", UserWarning)
return classes
| [
"torch.sigmoid",
"torch.argmax",
"torch.softmax"
] | 1.7 | Site-Command/lightning-flash | bfff08ded9cf193cce1cd16e7034d8005de172ae |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import pytest
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from flash import Trainer
from flash.core.finetuning import NoFreeze
from flash.core.utilities.imports import _TORCHVISION_AVAILABLE
from flash.image.classification import ImageClassifier
class DummyDataset(torch.utils.data.Dataset):
def __getitem__(self, index: int) -> Any:
return {"input": torch.rand(3, 64, 64), "target": torch.randint(10, size=(1, )).item()}
def __len__(self) -> int:
return 100
@pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="torchvision isn't installed.")
@pytest.mark.parametrize(
"strategy", ['no_freeze', 'freeze', 'freeze_unfreeze', 'unfreeze_milestones', None, 'cls', 'chocolat']
)
def test_finetuning(tmpdir: str, strategy):
train_dl = torch.utils.data.DataLoader(DummyDataset())
val_dl = torch.utils.data.DataLoader(DummyDataset())
task = ImageClassifier(10, backbone="resnet18")
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
if strategy == "cls":
strategy = NoFreeze()
if strategy == 'chocolat' or strategy is None:
with pytest.raises(MisconfigurationException, match="strategy should be provided"):
trainer.finetune(task, train_dl, val_dl, strategy=strategy)
else:
trainer.finetune(task, train_dl, val_dl, strategy=strategy)
| [
"torch.rand",
"torch.randint"
] | 1.7 | Site-Command/lightning-flash | bfff08ded9cf193cce1cd16e7034d8005de172ae |
3 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import math
import numpy as np
from typing import Tuple, Optional, Sequence
import torch
import torch.nn.functional as F
from pytorch3d.transforms import Rotate, Transform3d, Translate
from .utils import TensorProperties, convert_to_tensors_and_broadcast
# Default values for rotation and translation matrices.
r = np.expand_dims(np.eye(3), axis=0) # (1, 3, 3)
t = np.expand_dims(np.zeros(3), axis=0) # (1, 3)
class OpenGLPerspectiveCameras(TensorProperties):
"""
A class which stores a batch of parameters to generate a batch of
projection matrices using the OpenGL convention for a perspective camera.
The extrinsics of the camera (R and T matrices) can also be set in the
initializer or passed in to `get_full_projection_transform` to get
the full transformation from world -> screen.
The `transform_points` method calculates the full world -> screen transform
and then applies it to the input points.
The transforms can also be returned separately as Transform3d objects.
"""
def __init__(
self,
znear=1.0,
zfar=100.0,
aspect_ratio=1.0,
fov=60.0,
degrees: bool = True,
R=r,
T=t,
device="cpu",
):
"""
__init__(self, znear, zfar, aspect_ratio, fov, degrees, R, T, device) -> None # noqa
Args:
znear: near clipping plane of the view frustrum.
zfar: far clipping plane of the view frustrum.
aspect_ratio: ratio of screen_width/screen_height.
fov: field of view angle of the camera.
degrees: bool, set to True if fov is specified in degrees.
R: Rotation matrix of shape (N, 3, 3)
T: Translation matrix of shape (N, 3)
device: torch.device or string
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
super().__init__(
device=device,
znear=znear,
zfar=zfar,
aspect_ratio=aspect_ratio,
fov=fov,
R=R,
T=T,
)
# No need to convert to tensor or broadcast.
self.degrees = degrees
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the OpenGL perpective projection matrix with a symmetric
viewing frustrum. Use column major order.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in `__init__`.
Return:
P: a Transform3d object which represents a batch of projection
matrices of shape (N, 3, 3)
.. code-block:: python
f1 = -(far + near)/(far−near)
f2 = -2*far*near/(far-near)
h1 = (top + bottom)/(top - bottom)
w1 = (right + left)/(right - left)
tanhalffov = tan((fov/2))
s1 = 1/tanhalffov
s2 = 1/(tanhalffov * (aspect_ratio))
P = [
[s1, 0, w1, 0],
[0, s2, h1, 0],
[0, 0, f1, f2],
[0, 0, -1, 0],
]
"""
znear = kwargs.get("znear", self.znear) # pyre-ignore[16]
zfar = kwargs.get("zfar", self.zfar) # pyre-ignore[16]
fov = kwargs.get("fov", self.fov) # pyre-ignore[16]
# pyre-ignore[16]
aspect_ratio = kwargs.get("aspect_ratio", self.aspect_ratio)
degrees = kwargs.get("degrees", self.degrees)
P = torch.zeros(
(self._N, 4, 4), device=self.device, dtype=torch.float32
)
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
if degrees:
fov = (np.pi / 180) * fov
if not torch.is_tensor(fov):
fov = torch.tensor(fov, device=self.device)
tanHalfFov = torch.tan((fov / 2))
top = tanHalfFov * znear
bottom = -top
right = top * aspect_ratio
left = -right
# NOTE: In OpenGL the projection matrix changes the handedness of the
# coordinate frame. i.e the NDC space postive z direction is the
# camera space negative z direction. This is because the sign of the z
# in the projection matrix is set to -1.0.
# In pytorch3d we maintain a right handed coordinate system throughout
# so the so the z sign is 1.0.
z_sign = 1.0
P[:, 0, 0] = 2.0 * znear / (right - left)
P[:, 1, 1] = 2.0 * znear / (top - bottom)
P[:, 0, 2] = (right + left) / (right - left)
P[:, 1, 2] = (top + bottom) / (top - bottom)
P[:, 3, 2] = z_sign * ones
# NOTE: This part of the matrix is for z renormalization in OpenGL
# which maps the z to [-1, 1]. This won't work yet as the torch3d
# rasterizer ignores faces which have z < 0.
# P[:, 2, 2] = z_sign * (far + near) / (far - near)
# P[:, 2, 3] = -2.0 * far * near / (far - near)
# P[:, 3, 2] = z_sign * torch.ones((N))
# NOTE: This maps the z coordinate from [0, 1] where z = 0 if the point
# is at the near clipping plane and z = 1 when the point is at the far
# clipping plane. This replaces the OpenGL z normalization to [-1, 1]
# until rasterization is changed to clip at z = -1.
P[:, 2, 2] = z_sign * zfar / (zfar - znear)
P[:, 2, 3] = -(zfar * znear) / (zfar - znear)
# OpenGL uses column vectors so need to transpose the projection matrix
# as torch3d uses row vectors.
transform = Transform3d(device=self.device)
transform._matrix = P.transpose(1, 2).contiguous()
return transform
def clone(self):
other = OpenGLPerspectiveCameras(device=self.device)
return super().clone(other)
def get_camera_center(self, **kwargs):
"""
Return the 3D location of the camera optical center
in the world coordinates.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting T here will update the values set in init as this
value may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
C: a batch of 3D locations of shape (N, 3) denoting
the locations of the center of each camera in the batch.
"""
w2v_trans = self.get_world_to_view_transform(**kwargs)
P = w2v_trans.inverse().get_matrix()
# the camera center is the translation component (the first 3 elements
# of the last row) of the inverted world-to-view
# transform (4x4 RT matrix)
C = P[:, 3, :3]
return C
def get_world_to_view_transform(self, **kwargs) -> Transform3d:
"""
Return the world-to-view transform.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
T: a Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
"""
Return the full world-to-screen transform composing the
world-to-view and view-to-screen transforms.
Args:
**kwargs: parameters for the projection transforms can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
T: a Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
def transform_points(self, points, **kwargs) -> torch.Tensor:
"""
Transform input points from world to screen space.
Args:
points: torch tensor of shape (..., 3).
Returns
new_points: transformed points with the same shape as the input.
"""
world_to_screen_transform = self.get_full_projection_transform(**kwargs)
return world_to_screen_transform.transform_points(points)
class OpenGLOrthographicCameras(TensorProperties):
"""
A class which stores a batch of parameters to generate a batch of
transformation matrices using the OpenGL convention for orthographic camera.
"""
def __init__(
self,
znear=1.0,
zfar=100.0,
top=1.0,
bottom=-1.0,
left=-1.0,
right=1.0,
scale_xyz=((1.0, 1.0, 1.0),), # (1, 3)
R=r,
T=t,
device="cpu",
):
"""
__init__(self, znear, zfar, top, bottom, left, right, scale_xyz, R, T, device) -> None # noqa
Args:
znear: near clipping plane of the view frustrum.
zfar: far clipping plane of the view frustrum.
top: position of the top of the screen.
bottom: position of the bottom of the screen.
left: position of the left of the screen.
right: position of the right of the screen.
scale_xyz: scale factors for each axis of shape (N, 3).
R: Rotation matrix of shape (N, 3, 3).
T: Translation of shape (N, 3).
device: torch.device or string.
Only need to set left, right, top, bottom for viewing frustrums
which are non symmetric about the origin.
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
super().__init__(
device=device,
znear=znear,
zfar=zfar,
top=top,
bottom=bottom,
left=left,
right=right,
scale_xyz=scale_xyz,
R=R,
T=T,
)
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the OpenGL orthographic projection matrix.
Use column major order.
Args:
**kwargs: parameters for the projection can be passed in to
override the default values set in __init__.
Return:
P: a Transform3d object which represents a batch of projection
matrices of shape (N, 3, 3)
.. code-block:: python
scale_x = 2/(right - left)
scale_y = 2/(top - bottom)
scale_z = 2/(far-near)
mid_x = (right + left)/(right - left)
mix_y = (top + bottom)/(top - bottom)
mid_z = (far + near)/(far−near)
P = [
[scale_x, 0, 0, -mid_x],
[0, scale_y, 0, -mix_y],
[0, 0, -scale_z, -mid_z],
[0, 0, 0, 1],
]
"""
znear = kwargs.get("znear", self.znear) # pyre-ignore[16]
zfar = kwargs.get("zfar", self.zfar) # pyre-ignore[16]
left = kwargs.get("left", self.left) # pyre-ignore[16]
right = kwargs.get("right", self.right) # pyre-ignore[16]
top = kwargs.get("top", self.top) # pyre-ignore[16]
bottom = kwargs.get("bottom", self.bottom) # pyre-ignore[16]
scale_xyz = kwargs.get("scale_xyz", self.scale_xyz) # pyre-ignore[16]
P = torch.zeros(
(self._N, 4, 4), dtype=torch.float32, device=self.device
)
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
# NOTE: OpenGL flips handedness of coordinate system between camera
# space and NDC space so z sign is -ve. In PyTorch3D we maintain a
# right handed coordinate system throughout.
z_sign = +1.0
P[:, 0, 0] = (2.0 / (right - left)) * scale_xyz[:, 0]
P[:, 1, 1] = (2.0 / (top - bottom)) * scale_xyz[:, 1]
P[:, 0, 3] = -(right + left) / (right - left)
P[:, 1, 3] = -(top + bottom) / (top - bottom)
P[:, 3, 3] = ones
# NOTE: This maps the z coordinate to the range [0, 1] and replaces the
# the OpenGL z normalization to [-1, 1]
P[:, 2, 2] = z_sign * (1.0 / (zfar - znear)) * scale_xyz[:, 2]
P[:, 2, 3] = -znear / (zfar - znear)
# NOTE: This part of the matrix is for z renormalization in OpenGL.
# The z is mapped to the range [-1, 1] but this won't work yet in
# pytorch3d as the rasterizer ignores faces which have z < 0.
# P[:, 2, 2] = z_sign * (2.0 / (far - near)) * scale[:, 2]
# P[:, 2, 3] = -(far + near) / (far - near)
transform = Transform3d(device=self.device)
transform._matrix = P.transpose(1, 2).contiguous()
return transform
def clone(self):
other = OpenGLOrthographicCameras(device=self.device)
return super().clone(other)
def get_camera_center(self, **kwargs):
"""
Return the 3D location of the camera optical center
in the world coordinates.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting T here will update the values set in init as this
value may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
C: a batch of 3D locations of shape (N, 3) denoting
the locations of the center of each camera in the batch.
"""
w2v_trans = self.get_world_to_view_transform(**kwargs)
P = w2v_trans.inverse().get_matrix()
# The camera center is the translation component (the first 3 elements
# of the last row) of the inverted world-to-view
# transform (4x4 RT matrix).
C = P[:, 3, :3]
return C
def get_world_to_view_transform(self, **kwargs) -> Transform3d:
"""
Return the world-to-view transform.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
T: a Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
"""
Return the full world-to-screen transform composing the
world-to-view and view-to-screen transforms.
Args:
**kwargs: parameters for the projection transforms can be passed in
as keyword arguments to override the default values
set in `__init__`.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
T: a Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
def transform_points(self, points, **kwargs) -> torch.Tensor:
"""
Transform input points from world to screen space.
Args:
points: torch tensor of shape (..., 3).
Returns
new_points: transformed points with the same shape as the input.
"""
world_to_screen_transform = self.get_full_projection_transform(**kwargs)
return world_to_screen_transform.transform_points(points)
class SfMPerspectiveCameras(TensorProperties):
"""
A class which stores a batch of parameters to generate a batch of
transformation matrices using the multi-view geometry convention for
perspective camera.
"""
def __init__(
self,
focal_length=1.0,
principal_point=((0.0, 0.0),),
R=r,
T=t,
device="cpu",
):
"""
__init__(self, focal_length, principal_point, R, T, device) -> None
Args:
focal_length: Focal length of the camera in world units.
A tensor of shape (N, 1) or (N, 2) for
square and non-square pixels respectively.
principal_point: xy coordinates of the center of
the principal point of the camera in pixels.
A tensor of shape (N, 2).
R: Rotation matrix of shape (N, 3, 3)
T: Translation matrix of shape (N, 3)
device: torch.device or string
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
super().__init__(
device=device,
focal_length=focal_length,
principal_point=principal_point,
R=R,
T=T,
)
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the projection matrix using the
multi-view geometry convention.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in __init__.
Returns:
P: a batch of projection matrices of shape (N, 4, 4)
.. code-block:: python
fx = focal_length[:,0]
fy = focal_length[:,1]
px = principal_point[:,0]
py = principal_point[:,1]
P = [
[fx, 0, 0, px],
[0, fy, 0, py],
[0, 0, 0, 1],
[0, 0, 1, 0],
]
"""
# pyre-ignore[16]
principal_point = kwargs.get("principal_point", self.principal_point)
# pyre-ignore[16]
focal_length = kwargs.get("focal_length", self.focal_length)
P = _get_sfm_calibration_matrix(
self._N, self.device, focal_length, principal_point, False
)
transform = Transform3d(device=self.device)
transform._matrix = P.transpose(1, 2).contiguous()
return transform
def clone(self):
other = SfMPerspectiveCameras(device=self.device)
return super().clone(other)
def get_camera_center(self, **kwargs):
"""
Return the 3D location of the camera optical center
in the world coordinates.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting T here will update the values set in init as this
value may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
C: a batch of 3D locations of shape (N, 3) denoting
the locations of the center of each camera in the batch.
"""
w2v_trans = self.get_world_to_view_transform(**kwargs)
P = w2v_trans.inverse().get_matrix()
# the camera center is the translation component (the first 3 elements
# of the last row) of the inverted world-to-view
# transform (4x4 RT matrix)
C = P[:, 3, :3]
return C
def get_world_to_view_transform(self, **kwargs) -> Transform3d:
"""
Return the world-to-view transform.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
T: a Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
"""
Return the full world-to-screen transform composing the
world-to-view and view-to-screen transforms.
Args:
**kwargs: parameters for the projection transforms can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
def transform_points(self, points, **kwargs) -> torch.Tensor:
"""
Transform input points from world to screen space.
Args:
points: torch tensor of shape (..., 3).
Returns
new_points: transformed points with the same shape as the input.
"""
world_to_screen_transform = self.get_full_projection_transform(**kwargs)
return world_to_screen_transform.transform_points(points)
class SfMOrthographicCameras(TensorProperties):
"""
A class which stores a batch of parameters to generate a batch of
transformation matrices using the multi-view geometry convention for
orthographic camera.
"""
def __init__(
self,
focal_length=1.0,
principal_point=((0.0, 0.0),),
R=r,
T=t,
device="cpu",
):
"""
__init__(self, focal_length, principal_point, R, T, device) -> None
Args:
focal_length: Focal length of the camera in world units.
A tensor of shape (N, 1) or (N, 2) for
square and non-square pixels respectively.
principal_point: xy coordinates of the center of
the principal point of the camera in pixels.
A tensor of shape (N, 2).
R: Rotation matrix of shape (N, 3, 3)
T: Translation matrix of shape (N, 3)
device: torch.device or string
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
super().__init__(
device=device,
focal_length=focal_length,
principal_point=principal_point,
R=R,
T=T,
)
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the projection matrix using
the multi-view geometry convention.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in __init__.
Return:
P: a batch of projection matrices of shape (N, 4, 4)
.. code-block:: python
fx = focal_length[:,0]
fy = focal_length[:,1]
px = principal_point[:,0]
py = principal_point[:,1]
P = [
[fx, 0, 0, px],
[0, fy, 0, py],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
"""
# pyre-ignore[16]
principal_point = kwargs.get("principal_point", self.principal_point)
# pyre-ignore[16]
focal_length = kwargs.get("focal_length", self.focal_length)
P = _get_sfm_calibration_matrix(
self._N, self.device, focal_length, principal_point, True
)
transform = Transform3d(device=self.device)
transform._matrix = P.transpose(1, 2).contiguous()
return transform
def clone(self):
other = SfMOrthographicCameras(device=self.device)
return super().clone(other)
def get_camera_center(self, **kwargs):
"""
Return the 3D location of the camera optical center
in the world coordinates.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting T here will update the values set in init as this
value may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
C: a batch of 3D locations of shape (N, 3) denoting
the locations of the center of each camera in the batch.
"""
w2v_trans = self.get_world_to_view_transform(**kwargs)
P = w2v_trans.inverse().get_matrix()
# the camera center is the translation component (the first 3 elements
# of the last row) of the inverted world-to-view
# transform (4x4 RT matrix)
C = P[:, 3, :3]
return C
def get_world_to_view_transform(self, **kwargs) -> Transform3d:
"""
Return the world-to-view transform.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
T: a Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
"""
Return the full world-to-screen transform composing the
world-to-view and view-to-screen transforms.
Args:
**kwargs: parameters for the projection transforms can be passed in
as keyword arguments to override the default values
set in `__init__`.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
def transform_points(self, points, **kwargs) -> torch.Tensor:
"""
Transform input points from world to screen space.
Args:
points: torch tensor of shape (..., 3).
Returns
new_points: transformed points with the same shape as the input.
"""
world_to_screen_transform = self.get_full_projection_transform(**kwargs)
return world_to_screen_transform.transform_points(points)
# SfMCameras helper
def _get_sfm_calibration_matrix(
N, device, focal_length, principal_point, orthographic: bool
) -> torch.Tensor:
"""
Returns a calibration matrix of a perspective/orthograpic camera.
Args:
N: Number of cameras.
focal_length: Focal length of the camera in world units.
principal_point: xy coordinates of the center of
the principal point of the camera in pixels.
The calibration matrix `K` is set up as follows:
.. code-block:: python
fx = focal_length[:,0]
fy = focal_length[:,1]
px = principal_point[:,0]
py = principal_point[:,1]
for orthographic==True:
K = [
[fx, 0, 0, px],
[0, fy, 0, py],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
else:
K = [
[fx, 0, 0, px],
[0, fy, 0, py],
[0, 0, 0, 1],
[0, 0, 1, 0],
]
Returns:
A calibration matrix `K` of the SfM-conventioned camera
of shape (N, 4, 4).
"""
if not torch.is_tensor(focal_length):
focal_length = torch.tensor(focal_length, device=device)
if len(focal_length.shape) in (0, 1) or focal_length.shape[1] == 1:
fx = fy = focal_length
else:
fx, fy = focal_length.unbind(1)
if not torch.is_tensor(principal_point):
principal_point = torch.tensor(principal_point, device=device)
px, py = principal_point.unbind(1)
K = fx.new_zeros(N, 4, 4)
K[:, 0, 0] = fx
K[:, 1, 1] = fy
K[:, 0, 3] = px
K[:, 1, 3] = py
if orthographic:
K[:, 2, 2] = 1.0
K[:, 3, 3] = 1.0
else:
K[:, 3, 2] = 1.0
K[:, 2, 3] = 1.0
return K
################################################
# Helper functions for world to view transforms
################################################
def get_world_to_view_transform(R=r, T=t) -> Transform3d:
"""
This function returns a Transform3d representing the transformation
matrix to go from world space to view space by applying a rotation and
a translation.
Pytorch3d uses the same convention as Hartley & Zisserman.
I.e., for camera extrinsic parameters R (rotation) and T (translation),
we map a 3D point `X_world` in world coordinates to
a point `X_cam` in camera coordinates with:
`X_cam = X_world R + T`
Args:
R: (N, 3, 3) matrix representing the rotation.
T: (N, 3) matrix representing the translation.
Returns:
a Transform3d object which represents the composed RT transformation.
"""
# TODO: also support the case where RT is specified as one matrix
# of shape (N, 4, 4).
if T.shape[0] != R.shape[0]:
msg = "Expected R, T to have the same batch dimension; got %r, %r"
raise ValueError(msg % (R.shape[0], T.shape[0]))
if T.dim() != 2 or T.shape[1:] != (3,):
msg = "Expected T to have shape (N, 3); got %r"
raise ValueError(msg % repr(T.shape))
if R.dim() != 3 or R.shape[1:] != (3, 3):
msg = "Expected R to have shape (N, 3, 3); got %r"
raise ValueError(msg % repr(R.shape))
# Create a Transform3d object
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
return R.compose(T)
def camera_position_from_spherical_angles(
distance, elevation, azimuth, degrees: bool = True, device: str = "cpu"
) -> torch.Tensor:
"""
Calculate the location of the camera based on the distance away from
the target point, the elevation and azimuth angles.
Args:
distance: distance of the camera from the object.
elevation, azimuth: angles.
The inputs distance, elevation and azimuth can be one of the following
- Python scalar
- Torch scalar
- Torch tensor of shape (N) or (1)
degrees: bool, whether the angles are specified in degrees or radians.
device: str or torch.device, device for new tensors to be placed on.
The vectors are broadcast against each other so they all have shape (N, 1).
Returns:
camera_position: (N, 3) xyz location of the camera.
"""
broadcasted_args = convert_to_tensors_and_broadcast(
distance, elevation, azimuth, device=device
)
dist, elev, azim = broadcasted_args
if degrees:
elev = math.pi / 180.0 * elev
azim = math.pi / 180.0 * azim
x = dist * torch.cos(elev) * torch.sin(azim)
y = dist * torch.sin(elev)
z = dist * torch.cos(elev) * torch.cos(azim)
camera_position = torch.stack([x, y, z], dim=1)
if camera_position.dim() == 0:
camera_position = camera_position.view(1, -1) # add batch dim.
return camera_position.view(-1, 3)
def look_at_rotation(
camera_position, at=((0, 0, 0),), up=((0, 1, 0),), device: str = "cpu"
) -> torch.Tensor:
"""
This function takes a vector 'camera_position' which specifies the location
of the camera in world coordinates and two vectors `at` and `up` which
indicate the position of the object and the up directions of the world
coordinate system respectively. The object is assumed to be centered at
the origin.
The output is a rotation matrix representing the transformation
from world coordinates -> view coordinates.
Args:
camera_position: position of the camera in world coordinates
at: position of the object in world coordinates
up: vector specifying the up direction in the world coordinate frame.
The inputs camera_position, at and up can each be a
- 3 element tuple/list
- torch tensor of shape (1, 3)
- torch tensor of shape (N, 3)
The vectors are broadcast against each other so they all have shape (N, 3).
Returns:
R: (N, 3, 3) batched rotation matrices
"""
# Format input and broadcast
broadcasted_args = convert_to_tensors_and_broadcast(
camera_position, at, up, device=device
)
camera_position, at, up = broadcasted_args
for t, n in zip([camera_position, at, up], ["camera_position", "at", "up"]):
if t.shape[-1] != 3:
msg = "Expected arg %s to have shape (N, 3); got %r"
raise ValueError(msg % (n, t.shape))
z_axis = F.normalize(at - camera_position, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
R = torch.cat(
(x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1
)
return R.transpose(1, 2)
def look_at_view_transform(
dist=1.0,
elev=0.0,
azim=0.0,
degrees: bool = True,
eye: Optional[Sequence] = None,
at=((0, 0, 0),), # (1, 3)
up=((0, 1, 0),), # (1, 3)
device="cpu",
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
This function returns a rotation and translation matrix
to apply the 'Look At' transformation from world -> view coordinates [0].
Args:
dist: distance of the camera from the object
elev: angle in degres or radians. This is the angle between the
vector from the object to the camera, and the horizonal plane.
azim: angle in degrees or radians. The vector from the object to
the camera is projected onto a horizontal plane y = z = 0.
azim is the angle between the projected vector and a
reference vector at (1, 0, 0) on the reference plane.
dist, elem and azim can be of shape (1), (N).
degrees: boolean flag to indicate if the elevation and azimuth
angles are specified in degrees or radians.
eye: the position of the camera(s) in world coordinates. If eye is not
None, it will overide the camera position derived from dist, elev, azim.
up: the direction of the x axis in the world coordinate system.
at: the position of the object(s) in world coordinates.
eye, up and at can be of shape (1, 3) or (N, 3).
Returns:
2-element tuple containing
- **R**: the rotation to apply to the points to align with the camera.
- **T**: the translation to apply to the points to align with the camera.
References:
[0] https://www.scratchapixel.com
"""
if eye is not None:
broadcasted_args = convert_to_tensors_and_broadcast(
eye, at, up, device=device)
eye, at, up = broadcasted_args
C = eye
else:
broadcasted_args = convert_to_tensors_and_broadcast(
dist, elev, azim, at, up, device=device)
dist, elev, azim, at, up = broadcasted_args
C = camera_position_from_spherical_angles(
dist, elev, azim, degrees=degrees, device=device)
R = look_at_rotation(C, at, up, device=device)
T = -torch.bmm(R.transpose(1, 2), C[:, :, None])[:, :, 0]
return R, T
| [
"torch.zeros",
"torch.nn.functional.normalize",
"torch.cat",
"torch.cos",
"torch.stack",
"torch.tan",
"torch.is_tensor",
"torch.sin",
"torch.ones",
"torch.tensor",
"torch.cross"
] | 3 | nikhilaravi/pytorch3d-1 | 2480723adf1ce8a5cfca5c190f5fba7a48549f75 |
1.9 | import math
import time
import torch
from copy import deepcopy
from tensornet.engine.ops.regularizer import l1
from tensornet.engine.ops.checkpoint import ModelCheckpoint
from tensornet.engine.ops.tensorboard import TensorBoard
from tensornet.data.processing import InfiniteDataLoader
from tensornet.utils.progress_bar import ProgressBar
class Learner:
"""Model Trainer and Validator.
Args:
train_loader (torch.utils.data.DataLoader): Training data loader.
optimizer (torch.optim): Optimizer for the model.
criterion (torch.nn): Loss Function.
device (:obj:`str` or :obj:`torch.device`, optional): Device where the data
will be loaded. (default='cpu')
epochs (:obj:`int`, optional): Numbers of epochs/iterations to train the model for.
(default: 1)
l1_factor (:obj:`float`, optional): L1 regularization factor. (default: 0)
val_loader (:obj:`torch.utils.data.DataLoader`, optional): Validation data loader.
callbacks (:obj:`list`, optional): List of callbacks to be used during training.
metrics (:obj:`list`, optional): List of names of the metrics for model
evaluation.
*Note*: If the model has multiple outputs, then this will be a nested list
where each individual sub-list will specify the metrics which are to be used for
evaluating each output respectively. In such cases, the model checkpoint will
consider only the metric of the first output for saving checkpoints.
activate_loss_logits (:obj:`bool`, optional): If True, the logits will first pass
through the `activate_logits` function before going to the criterion.
(default: False)
record_train (:obj:`bool`, optional): If False, metrics will be calculated only
during validation. (default: True)
"""
def __init__(
self, train_loader, optimizer, criterion, device='cpu',
epochs=1, l1_factor=0.0, val_loader=None, callbacks=None, metrics=None,
activate_loss_logits=False, record_train=True
):
self.model = None
self.optimizer = optimizer
self.criterion = criterion
self.train_loader = train_loader
self.device = device
self.epochs = epochs
self.val_loader = val_loader
self.l1_factor = l1_factor
self.activate_loss_logits = activate_loss_logits
self.record_train = record_train
self.lr_schedulers = {
'step_lr': None,
'lr_plateau': None,
'one_cycle_policy': None,
'cyclic_lr': None,
}
self.checkpoint = None
self.summary_writer = None
if callbacks is not None:
self._setup_callbacks(callbacks)
# Training
self.train_losses = [] # Change in loss
self.train_metrics = [] # Change in evaluation metric
self.val_losses = [] # Change in loss
self.val_metrics = [] # Change in evaluation metric
# Set evaluation metrics
self.metrics = []
if metrics:
self._setup_metrics(metrics)
def _setup_callbacks(self, callbacks):
"""Extract callbacks passed to the class.
Args:
callbacks (list): List of callbacks.
"""
for callback in callbacks:
if isinstance(callback, torch.optim.lr_scheduler.StepLR):
self.lr_schedulers['step_lr'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.ReduceLROnPlateau):
self.lr_schedulers['lr_plateau'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.OneCycleLR):
self.lr_schedulers['one_cycle_policy'] = callback
elif isinstance(callback, ModelCheckpoint):
if callback.monitor.startswith('train_'):
if self.record_train:
self.checkpoint = callback
else:
raise ValueError(
'Cannot use checkpoint for a training metric if record_train is set to False'
)
else:
self.checkpoint = callback
elif isinstance(callback, TensorBoard):
self.summary_writer = callback
elif isinstance(callback, torch.optim.lr_scheduler.CyclicLR):
self.lr_schedulers['cyclic_lr'] = callback
def set_model(self, model):
"""Assign model to learner.
Args:
model (torch.nn.Module): Model Instance.
"""
self.model = model
if self.summary_writer is not None:
self.summary_writer.write_model(self.model)
def _accuracy(self, label, prediction, idx=0):
"""Calculate accuracy.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
self.metrics[idx]['accuracy']['sum'] += prediction.eq(
label.view_as(prediction)
).sum().item()
self.metrics[idx]['accuracy']['num_steps'] += len(label)
self.metrics[idx]['accuracy']['value'] = round(
100 * self.metrics[idx]['accuracy']['sum'] / self.metrics[idx]['accuracy']['num_steps'], 2
)
def _iou(self, label, prediction, idx=0):
"""Calculate Intersection over Union.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
# Remove 1 channel dimension
label = label.squeeze(1)
prediction = prediction.squeeze(1)
intersection = (prediction * label).sum(2).sum(1)
union = (prediction + label).sum(2).sum(1) - intersection
# epsilon is added to avoid 0/0
epsilon = 1e-6
iou = (intersection + epsilon) / (union + epsilon)
self.metrics[idx]['iou']['sum'] += iou.sum().item()
self.metrics[idx]['iou']['num_steps'] += label.size(0)
self.metrics[idx]['iou']['value'] = round(
self.metrics[idx]['iou']['sum'] / self.metrics[idx]['iou']['num_steps'], 3
)
def _pred_label_diff(self, label, prediction, rel=False):
"""Calculate the difference between label and prediction.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
rel (:obj:`bool`, optional): If True, return the relative
difference. (default: False)
Returns:
Difference between label and prediction
"""
# For numerical stability
valid_labels = label > 0.0001
_label = label[valid_labels]
_prediction = prediction[valid_labels]
valid_element_count = _label.size(0)
if valid_element_count > 0:
diff = torch.abs(_label - _prediction)
if rel:
diff = torch.div(diff, _label)
return diff, valid_element_count
def _rmse(self, label, prediction, idx=0):
"""Calculate Root Mean Square Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction)
rmse = 0
if diff is not None:
rmse = math.sqrt(torch.sum(torch.pow(diff[0], 2)) / diff[1])
self.metrics[idx]['rmse']['num_steps'] += label.size(0)
self.metrics[idx]['rmse']['sum'] += rmse * label.size(0)
self.metrics[idx]['rmse']['value'] = round(
self.metrics[idx]['rmse']['sum'] / self.metrics[idx]['rmse']['num_steps'], 3
)
def _mae(self, label, prediction, idx=0):
"""Calculate Mean Average Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction)
mae = 0
if diff is not None:
mae = torch.sum(diff[0]).item() / diff[1]
self.metrics[idx]['mae']['num_steps'] += label.size(0)
self.metrics[idx]['mae']['sum'] += mae * label.size(0)
self.metrics[idx]['mae']['value'] = round(
self.metrics[idx]['mae']['sum'] / self.metrics[idx]['mae']['num_steps'], 3
)
def _abs_rel(self, label, prediction, idx=0):
"""Calculate Absolute Relative Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction, rel=True)
abs_rel = 0
if diff is not None:
abs_rel = torch.sum(diff[0]).item() / diff[1]
self.metrics[idx]['abs_rel']['num_steps'] += label.size(0)
self.metrics[idx]['abs_rel']['sum'] += abs_rel * label.size(0)
self.metrics[idx]['abs_rel']['value'] = round(
self.metrics[idx]['abs_rel']['sum'] / self.metrics[idx]['abs_rel']['num_steps'], 3
)
def _setup_metrics(self, metrics):
"""Validate the evaluation metrics passed to the class.
Args:
metrics (:obj:`list` or :obj:`dict`): Metrics.
"""
if not isinstance(metrics[0], (list, tuple)):
metrics = [metrics]
for idx, metric_list in enumerate(metrics):
metric_dict = {}
for metric in metric_list:
metric_info = {'value': 0, 'sum': 0, 'num_steps': 0}
if metric == 'accuracy':
metric_info['func'] = self._accuracy
elif metric == 'rmse':
metric_info['func'] = self._rmse
elif metric == 'mae':
metric_info['func'] = self._mae
elif metric == 'abs_rel':
metric_info['func'] = self._abs_rel
elif metric == 'iou':
metric_info['func'] = self._iou
if 'func' in metric_info:
metric_dict[metric] = metric_info
if metric_dict:
self.metrics.append(metric_dict)
self.train_metrics.append({
x: [] for x in metric_dict.keys()
})
self.val_metrics.append({
x: [] for x in metric_dict.keys()
})
def _calculate_metrics(self, labels, predictions):
"""Update evaluation metric values.
Args:
label (:obj:`torch.Tensor` or :obj:`dict`): Ground truth.
prediction (:obj:`torch.Tensor` or :obj:`dict`): Prediction.
"""
predictions = self.activate_logits(predictions)
if not isinstance(labels, (list, tuple)):
labels = [labels]
predictions = [predictions]
for idx, (label, prediction) in enumerate(zip(labels, predictions)):
# If predictions are one-hot encoded
if label.size() != prediction.size():
prediction = prediction.argmax(dim=1, keepdim=True) * 1.0
if idx < len(self.metrics):
for metric in self.metrics[idx]:
self.metrics[idx][metric]['func'](
label, prediction, idx=idx
)
def _reset_metrics(self):
"""Reset metric params."""
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.metrics[idx][metric]['value'] = 0
self.metrics[idx][metric]['sum'] = 0
self.metrics[idx][metric]['num_steps'] = 0
def _get_pbar_values(self, loss):
"""Create progress bar description.
Args:
loss (float): Loss value.
"""
pbar_values = [('loss', round(loss, 2))]
if self.metrics and self.record_train:
for idx in range(len(self.metrics)):
for metric, info in self.metrics[idx].items():
metric_name = metric
if len(self.metrics) > 1:
metric_name = f'{idx} - {metric}'
pbar_values.append((metric_name, info['value']))
return pbar_values
def update_training_history(self, loss):
"""Update the training history.
Args:
loss (float): Loss value.
"""
self.train_losses.append(loss)
if self.record_train:
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.train_metrics[idx][metric].append(
self.metrics[idx][metric]['value']
)
def reset_history(self):
"""Reset the training history"""
self.train_losses = []
self.val_losses = []
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.train_metrics[idx][metric] = []
self.val_metrics[idx][metric] = []
self._reset_metrics()
def activate_logits(self, logits):
"""Apply activation function to the logits if needed.
After this the logits will be sent for calculation of
loss or evaluation metrics.
Args:
logits (torch.Tensor): Model output
Returns:
(*torch.Tensor*): activated logits
"""
return logits
def calculate_criterion(self, logits, targets, train=True):
"""Calculate loss.
Args:
logits (torch.Tensor): Prediction.
targets (torch.Tensor): Ground truth.
train (:obj:`bool`, optional): If True, loss is sent to the
L1 regularization function. (default: True)
Returns:
(*torch.Tensor*): loss value
"""
if self.activate_loss_logits:
logits = self.activate_logits(logits)
if train:
return l1(self.model, self.criterion(logits, targets), self.l1_factor)
return self.criterion(logits, targets)
def fetch_data(self, data):
"""Fetch data from loader and load it to GPU.
Args:
data (:obj:`tuple` or :obj:`list`): List containing inputs and targets.
Returns:
inputs and targets loaded to GPU.
"""
return data[0].to(self.device), data[1].to(self.device)
def train_batch(self, data):
"""Train the model on a batch of data.
Args:
data (:obj:`tuple` or :obj:`list`): Input and target data for the model.
Returns:
(*float*): Batch loss.
"""
inputs, targets = self.fetch_data(data)
self.optimizer.zero_grad() # Set gradients to zero before starting backpropagation
y_pred = self.model(inputs) # Predict output
loss = self.calculate_criterion(y_pred, targets, train=True) # Calculate loss
# Perform backpropagation
loss.backward()
self.optimizer.step()
if self.record_train:
self._calculate_metrics(targets, y_pred)
# One Cycle Policy for learning rate
if self.lr_schedulers['one_cycle_policy'] is not None:
self.lr_schedulers['one_cycle_policy'].step()
# Cyclic LR policy
if self.lr_schedulers['cyclic_lr'] is not None:
self.lr_schedulers['cyclic_lr'].step()
return loss.item()
def train_epoch(self, verbose=True):
"""Run an epoch of model training.
Args:
verbose (:obj:`bool`, optional): Print logs. (default: True)
"""
self.model.train()
if verbose:
pbar = ProgressBar(target=len(self.train_loader), width=8)
for batch_idx, data in enumerate(self.train_loader, 0):
# Train a batch
loss = self.train_batch(data)
# Update Progress Bar
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.update(batch_idx, values=pbar_values)
# Update training history
self.update_training_history(loss)
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.add(1, values=pbar_values)
self._reset_metrics()
def train_iterations(self, verbose=True):
"""Train model for the 'self.epochs' number of batches."""
self.model.train()
if verbose:
pbar = ProgressBar(target=self.epochs, width=8)
iterator = InfiniteDataLoader(self.train_loader)
for iteration in range(self.epochs):
# Train a batch
loss = self.train_batch(iterator.get_batch())
# Update Progress Bar
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.update(iteration, values=pbar_values)
# Update training history
self.update_training_history(loss)
if verbose:
pbar.add(1, values=pbar_values)
def evaluate(self, loader, verbose=True, log_message='Evaluation'):
"""Evaluate the model on a custom data loader.
Args:
loader (torch.utils.data.DataLoader): Data loader.
verbose (:obj:`bool`, optional): Print loss and metrics. (default: True)
log_message (str): Prefix for the logs which are printed at the end.
Returns:
loss and metric values
"""
start_time = time.time()
self.model.eval()
eval_loss = 0
with torch.no_grad():
for data in loader:
inputs, targets = self.fetch_data(data)
output = self.model(inputs) # Get trained model output
eval_loss += self.calculate_criterion(
output, targets, train=False
).item() # Sum up batch loss
self._calculate_metrics(targets, output) # Calculate evaluation metrics
eval_loss /= len(loader.dataset)
eval_metrics = deepcopy(self.metrics)
end_time = time.time()
# Time spent during validation
duration = int(end_time - start_time)
minutes = duration // 60
seconds = duration % 60
if verbose:
log = f'{log_message} (took {minutes} minutes, {seconds} seconds): Average loss: {eval_loss:.4f}'
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
log += f', {metric}: {self.metrics[idx][metric]["value"]}'
log += '\n'
print(log)
self._reset_metrics()
return eval_loss, eval_metrics
def validate(self, verbose=True):
"""Validate an epoch of model training.
Args:
verbose (:obj:`bool`, optional): Print validation loss and metrics.
(default: True)
"""
eval_loss, eval_metrics = self.evaluate(
self.val_loader, verbose=verbose, log_message='Validation set'
)
# Update validation logs
self.val_losses.append(eval_loss)
for idx in range(len(eval_metrics)):
for metric in eval_metrics[idx]:
self.val_metrics[idx][metric].append(
eval_metrics[idx][metric]['value']
)
def save_checkpoint(self, epoch=None):
"""Save model checkpoint.
Args:
epoch (:obj:`int`, optional): Current epoch number.
"""
if self.checkpoint is not None:
metric = None
if self.checkpoint.monitor == 'train_loss':
metric = self.train_losses[-1]
elif self.checkpoint.monitor == 'val_loss':
metric = self.val_losses[-1]
elif self.metrics:
if self.checkpoint.monitor.startswith('train_'):
if self.record_train:
metric = self.train_metrics[0][
self.checkpoint.monitor.split('train_')[-1]
][-1]
else:
metric = self.val_metrics[0][
self.checkpoint.monitor.split('val_')[-1]
][-1]
else:
print('Invalid metric function, can\'t save checkpoint.')
return
self.checkpoint(self.model, metric, epoch)
def write_summary(self, epoch, train):
"""Write training summary in tensorboard.
Args:
epoch (int): Current epoch number.
train (bool): If True, summary will be
written for model training else it
will be writtern for model validation.
"""
if self.summary_writer is not None:
if train:
mode = 'train'
# Write Images
self.summary_writer.write_images(
self.model, self.activate_logits, f'prediction_epoch_{epoch}'
)
loss = self.train_losses[-1]
else:
mode = 'val'
loss = self.val_losses[-1]
# Write Loss
self.summary_writer.write_scalar(
f'Loss/{mode}', loss, epoch
)
if not train or self.record_train:
for idx in range(len(self.metrics)):
for metric, info in self.metrics[idx].items():
self.summary_writer.write_scalar(
f'{idx}/{metric.title()}/{mode}',
info['value'], epoch
)
def fit(self, start_epoch=1, epochs=None, reset=True, verbose=True):
"""Perform model training.
Args:
start_epoch (:obj:`int`, optional): Start epoch for training.
(default: 1)
epochs (:obj:`int`, optional): Numbers of epochs/iterations to
train the model for. If no value is given, the original
value given during initialization of learner will be used.
reset (:obj:`bool`, optional): Flag to indicate that training
is starting from scratch. (default: True)
verbose (:obj:`bool`, optional): Print logs. (default: True)
"""
if reset:
self.reset_history()
if epochs is not None:
self.epochs = epochs
for epoch in range(start_epoch, start_epoch + self.epochs):
if verbose:
print(f'Epoch {epoch}:')
# Train an epoch
self.train_epoch(verbose=verbose)
self.write_summary(epoch, True)
# Validate the model
if self.val_loader is not None:
self.validate(verbose=verbose)
self.write_summary(epoch, False)
# Save model checkpoint
self.save_checkpoint(epoch)
# Call Step LR
if not self.lr_schedulers['step_lr'] is None:
self.lr_schedulers['step_lr'].step()
# Call Reduce LR on Plateau
if not self.lr_schedulers['lr_plateau'] is None:
self.lr_schedulers['lr_plateau'].step(self.val_losses[-1])
| [
"torch.no_grad",
"torch.pow",
"torch.abs",
"torch.div",
"torch.sum"
] | 1.9.0 | shan18/TensorNet | c79a0c64152dbeb3499d204994772858326f668c |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
PACER: Partial And Complete Efficient Re-ranking.
See `PacerTreeSearchMixin.modify_logprobs` for a complete description.
"""
import random
import torch
import torch.nn.functional as F
from typing import Optional, Any, Dict, List
from parlai.agents.transformer.transformer import TransformerGeneratorAgent
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.torch_generator_agent import (
TorchGeneratorAgent,
TreeSearch,
GreedySearch,
BeamSearch,
DelayedBeamSearch,
TopKSampling,
NucleusSampling,
TSType,
)
import parlai.utils.logging as logging
from parlai.utils.torch import neginf
from projects.light_whoami.agents.rpa_rerank import (
RPAReranker,
RPARerankAgent,
LongRPARerankAgent,
)
from projects.light_whoami.task.utils import extract_characters
from projects.msc.agents.long_tga import TransformerVariantAgent
class PacerAgentMixin:
"""
Override TGA to use a different tree search decoder.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
RPAReranker.add_cmdline_args(parser, partial_opt=partial_opt)
group = parser.add_argument_group('PACER Group')
group.add_argument(
'--pacer-n-tokens',
type=int,
default=10,
help='How many tokens to re-rank and consider',
)
group.add_argument(
'--pacer-frequency-ratio',
type=float,
default=0.05,
help='The frequency with which to apply PACER re-ranking.',
)
return parser
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
if not shared:
self.classifier = RPAReranker(opt)
else:
self.classifier = shared['classifier']
assert opt[
'beam_block_full_context'
], 'must set --beam-block-full-context True to use PACER'
def share(self) -> Dict[str, Any]:
shared = super().share()
shared['classifier'] = self.classifier
return shared
def _get_batch_context(self, batch):
"""
Override to always provide full context.
"""
if 'full_text_vec' not in batch:
logging.warn('Batch does not have full text vec, resorting to text vec')
return batch.text_vec
return batch.full_text_vec
def _treesearch_factory(self, device: int) -> TreeSearch:
method = self.opt.get('inference', 'greedy')
beam_size = self.opt.get('beam_size', 1)
pacer_kwargs = {
'classifier': self.classifier,
'pacer_n_tokens': self.opt['pacer_n_tokens'],
'pacer_frequency_ratio': self.opt['pacer_frequency_ratio'],
'agent': self,
}
if method == 'greedy':
return PacerGreedySearch(
beam_size,
min_length=0,
block_ngram=self.beam_block_ngram,
context_block_ngram=self.beam_context_block_ngram,
length_penalty=self.opt.get('beam_length_penalty', 0.65),
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=device,
**pacer_kwargs,
)
elif method == 'beam':
return PacerBeamSearch(
beam_size,
min_length=self.beam_min_length,
block_ngram=self.beam_block_ngram,
context_block_ngram=self.beam_context_block_ngram,
length_penalty=self.opt.get('beam_length_penalty', 0.65),
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=device,
**pacer_kwargs,
)
elif method == 'delayedbeam':
return PacerDelayedBeamSearch(
self.opt['topk'],
self.opt['beam_delay'],
beam_size,
min_length=self.beam_min_length,
block_ngram=self.beam_block_ngram,
context_block_ngram=self.beam_context_block_ngram,
length_penalty=self.opt.get('beam_length_penalty', 0.65),
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=device,
**pacer_kwargs,
)
elif method == 'topk':
return PacerTopKSampling(
self.opt['topk'],
beam_size,
min_length=self.beam_min_length,
block_ngram=self.beam_block_ngram,
context_block_ngram=self.beam_context_block_ngram,
length_penalty=self.opt.get('beam_length_penalty', 0.65),
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=device,
**pacer_kwargs,
)
elif method == 'nucleus':
return PacerNucleusSampling(
self.opt['topp'],
beam_size,
min_length=self.beam_min_length,
block_ngram=self.beam_block_ngram,
context_block_ngram=self.beam_context_block_ngram,
length_penalty=self.opt.get('beam_length_penalty', 0.65),
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=device,
**pacer_kwargs,
)
else:
raise NotImplementedError(
f'Other gen methods not available for PACER: {method}'
)
class PacerTreeSearchMixin(TreeSearch):
def __init__(self, *args, **kwargs):
self.classifier = kwargs.pop('classifier')
self.agent = kwargs.pop('agent')
self.n_toks = kwargs.pop('pacer_n_tokens')
self.frequency = kwargs.pop('pacer_frequency_ratio')
super().__init__(*args, **kwargs)
def set_batch_context(
self: TSType, batch_context_list: List[List[int]], batch_idx: int
) -> TSType:
"""
Override to save de-tokenized version of context.
"""
self.context = batch_context_list[batch_idx]
self.context_str = self.agent._v2t(self.context)
self.character = extract_characters(self.context_str)['_self_name']
return self
def select_paths(
self, logprobs: torch.Tensor, prior_scores: torch.Tensor, current_length: int
):
"""
Override select_paths to modify the logprobs according to classifier outputs.
:param logprobs:
a (beamsize x vocab) tensor of log probabilities. If this is the first
turn in the dialogue, it will be a (1 x vocab) tensor.
:param prior_scores:
a (beamsize) tensor of weights with the cumulative running
log-probability of each beam. If the first turn, it will be a (1) tensor.
:param current_length:
the current length in tokens
:return:
a (hypothesis_ids, token_id, scores) tuple, where:
- hypothesis_ids is a LongTensor of hypotheses we're extending. May have
repeats, but should always be (beamsize) long.
- token_ids is a (beamsize) LongTensor of next-token choices for
each of the hypotheses.
- scores is a (beamsize) Tensor with the updated cumulative log-probs
of each beam.
"""
logprobs = self.modify_logprobs(logprobs)
return super().select_paths(logprobs, prior_scores, current_length)
def modify_logprobs(self, logprobs: torch.Tensor) -> torch.Tensor:
"""
Modify logprobs in PACER.
The way it works:
1. With frequency r, select a token x_i+1 to re-rank.
2. Generate word probabilities for token x_i+1.
3. Examine top k words {x_j | score(x_j) \in top_k(P(x_i+1 | x_0,...,x_i))}; use classifier to predict P(a|x1, ..., x_i, x_j)
4. Rescore top k words via multiplication, re-normalize, and advance the generation.
:param logprobs:
initial token probabilities
:return modified:
return the modified log probabilities according to PACER
"""
if random.random() > self.frequency:
return logprobs
vals, inds = logprobs.topk(self.n_toks, dim=-1, sorted=False)
new_probs = logprobs.clone().fill_(neginf(logprobs.dtype))
# Construct partial hypotheses for each beam for each top K tokens
batch_hyps = [
h
for i in range(len(self.partial_hyps))
for h in [
self.agent._v2t(self.partial_hyps[i][1:] + [ind]) for ind in inds[i]
]
]
# Classify all beam outputs
predictor_outputs = self.classifier.batch_classify(
[self.context_str] * self.n_toks * logprobs.size(0), batch_hyps
)
# Extract RPA scores
log_predictor_scores = (
torch.stack(
[
F.log_softmax(pred['sorted_scores'].float(), dim=0)[
int(pred['text'] == self.character) - 1
]
for pred in predictor_outputs
]
)
.to(vals.device)
.view(vals.size())
)
# "Multiply" Probabilities (in log space...)
scores = vals + log_predictor_scores
for i in range(new_probs.size(0)):
new_probs[i, inds[i]] = scores[i]
return F.log_softmax(new_probs, dim=-1, dtype=torch.float32) # type: ignore
class PacerGreedySearch(PacerTreeSearchMixin, GreedySearch):
"""
Override Greedy to work with PACER.
"""
pass
class PacerBeamSearch(PacerTreeSearchMixin, BeamSearch):
"""
Override Beam to work with PACER.
"""
pass
class PacerDelayedBeamSearch(PacerTreeSearchMixin, DelayedBeamSearch):
"""
Override Delayed Beam Search to work with PACER.
"""
pass
class PacerTopKSampling(PacerTreeSearchMixin, TopKSampling):
"""
Override TopK Sampling to work with PACER.
"""
pass
class PacerNucleusSampling(PacerTreeSearchMixin, NucleusSampling):
"""
Override Nucleus Sampling to work with PAcer
"""
pass
class PacerPartialOnlyAgent(PacerAgentMixin, TransformerGeneratorAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
TransformerGeneratorAgent.add_cmdline_args(parser, partial_opt=partial_opt)
PacerAgentMixin.add_cmdline_args(parser, partial_opt=partial_opt)
return parser
class LongPacerPartialOnlyAgent(PacerAgentMixin, TransformerVariantAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
TransformerVariantAgent.add_cmdline_args(parser, partial_opt=partial_opt)
PacerAgentMixin.add_cmdline_args(parser, partial_opt)
return parser
class PacerAgent(PacerPartialOnlyAgent, RPARerankAgent):
"""
PACER Agent: Combines Beam and Partial Re-ranking
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
RPARerankAgent.add_cmdline_args(parser, partial_opt=partial_opt)
PacerPartialOnlyAgent.add_cmdline_args(parser, partial_opt=partial_opt)
return parser
class LongPacerAgent(LongPacerPartialOnlyAgent, LongRPARerankAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
LongRPARerankAgent.add_cmdline_args(parser, partial_opt=partial_opt)
LongPacerPartialOnlyAgent.add_cmdline_args(parser, partial_opt=partial_opt)
return parser
| [
"torch.nn.functional.log_softmax"
] | 1.4.0 | MMnash/ParlAI | 7429016bce901b00f9bf4b06c82687d49cd548fa |
1.10 | #encoding:utf-8
import random
import numpy as np
import matplotlib as mpl
mpl.use('Agg')# AGG(Anti-Grain Geometry engine)
import matplotlib.pyplot as plt
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torchvision
from torchvision import models,transforms
import torchvision.utils as vutils
import torch.nn.init as init
from torch.autograd import Function
import torch.nn.functional as F
import torchaudio
#wavファイル、話者id、テキスト(音素列)の3つを読み込むためのDatasetクラス
class AudioSpeakerTextLoader(torch.utils.data.Dataset):
"""
1) 前処理によって作成されたtxtファイルに書かれたwavファイル、話者id、テキスト(音素列)の3つを読み込む
2) テキストを正規化し整数へと変換
3) wavファイルからスペクトログラムを計算
"""
def __init__(self, dataset_txtfile_path, phoneme_list):
#dataset_txtfile_path : 前処理によって作成されたtxtファイルへのパス
#phoneme_list : 学習に用いる音素のlist
self.sampling_rate = 22050
self.filter_length = 1024
self.hop_length = 256
self.win_length = 1024
self.phoneme_list = phoneme_list
#音素とindexを対応付け 対応を前計算しておくことでバッチ作成時の処理を高速化する
self.phoneme2index = {p : i for i, p in enumerate(self.phoneme_list, 0)}
###前処理によって作成されたtxtファイルの読み込み###
#一行につき
#wavファイルへのパス|話者id|音素列
#というフォーマットで記述されている
with open(dataset_txtfile_path, "r") as f:
self.wavfilepath_speakerid_text = [line.split("|") for line in f.readlines()]
#各行をランダムにシャッフル
random.seed(1234)
random.shuffle(self.wavfilepath_speakerid_text)
def get_audio_text_speaker_pair(self, audiopath_sid_text):
# separate filename, speaker_id and text
audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
wav, spec = self.get_audio(audiopath)
text = self.get_text(text)
sid = self.get_sid(sid)
return (wav, spec, text, sid)
def get_audio(self, wavfile_path):
#wavファイルの読み込み
wav, _ = torchaudio.load(wavfile_path)
#wavからspectrogramを計算
#計算結果はファイルに保存しておき、2回目以降はそれを読み込むだけにする
spec_filename = wavfile_path.replace(".wav", ".spec.pt")
if os.path.exists(spec_filename):
spec = torch.load(spec_filename)
else:
pad_size = int((self.filter_length-self.hop_length)/2)
wav_padded = torch.nn.functional.pad(wav, (pad_size, pad_size), mode='reflect')
spec = torchaudio.functional.spectrogram(
waveform=wav_padded,
pad=0,#torchaudio.functional.spectrogram内で使われているtorch.nn.functional.padはmode='constant'となっているが、今回はmode='reflect'としたいため手動でpaddingする
window=torch.hann_window(self.win_length),
n_fft=self.filter_length,
hop_length=self.hop_length,
win_length=self.win_length,
power=2,
normalized=False,
center=False
)
spec = torch.squeeze(spec, 0)
torch.save(spec, spec_filename)
return wav, spec
def get_sid(self, sid):
sid = torch.LongTensor([int(sid)])
return sid
def get_text(self, text):
#Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
text_splitted = text.replace("\n", "").split(",")
text_converted_into_index = [self.phoneme2index[p] for p in text_splitted]#音素を数値に変換
#各音素の間に0を挿入する
text_norm = [0] * (len(text_converted_into_index) * 2 + 1)
text_norm[1::2] = text_converted_into_index
#tensorへと変換
text_norm = torch.LongTensor(text_norm)
return text_norm
def __getitem__(self, index):
line = self.wavfilepath_speakerid_text[index]
wavfilepath, speakerid, text = line[0], line[1], line[2]
wav, spec = self.get_audio(wavfilepath)
speaker_id = self.get_sid(speakerid)
text = self.get_text(text)
return (wav, spec, speaker_id, text)
def __len__(self):
return len(self.wavfilepath_speakerid_text)
#AudioSpeakerTextLoaderの__getitem__により取得されたデータをバッチへと固める関数
def collate_fn(batch):
# batch = [
# (wav, spec, speaker_id, text),
# (wav, spec, speaker_id, text),
# ....
# ]
max_wav_len = max([x[0].size(1) for x in batch])#wavの最大の長さを算出
max_spec_len = max([x[1].size(1) for x in batch])#spectrogramの最大の長さを算出
max_text_len = max([x[3].size(0) for x in batch])#textの最大の長さを算出
batch_size = len(batch)
wav_lengths = torch.LongTensor(batch_size)#torch.size([batch_size])
spec_lengths = torch.LongTensor(batch_size)
speaker_id = torch.LongTensor(batch_size)
text_lengths = torch.LongTensor(batch_size)
wav_padded = torch.zeros(batch_size, 1, max_wav_len, dtype=torch.float32)
spec_padded = torch.zeros(batch_size, batch[0][1].size(0), max_spec_len, dtype=torch.float32)
text_padded = torch.zeros(batch_size, max_text_len, dtype=torch.long)
#text_padded, spec_padded, wav_paddedは全ての要素が0で初期化されているが、
#左詰めで元のtext, spec, wavで上書きすることによりzero-paddingされたtensorを取得できる
for i, (wav_row, spec_row, speaker_id_row, text_row) in enumerate(batch, 0):
wav_padded[i, :, :wav_row.size(1)] = wav_row
wav_lengths[i] = wav_row.size(1)
spec_padded[i, :, :spec_row.size(1)] = spec_row
spec_lengths[i] = spec_row.size(1)
speaker_id[i] = speaker_id_row
text_padded[i, :text_row.size(0)] = text_row
text_lengths[i] = text_row.size(0)
return wav_padded, wav_lengths, \
spec_padded, spec_lengths, \
speaker_id, \
text_padded, text_lengths
#batch内の各tensorについて、start_indices[i]で指定されたindexから長さsegment_sizeの箇所を取り出す関数
#学習時、スペクトログラムや音声波形について、時間軸に沿って指定した長さだけ切り取るのに用いる
def slice_segments(input_tensor, start_indices, segment_size):
output_tensor = torch.zeros_like(input_tensor[:, ..., :segment_size])
batch_size = input_tensor.size(0)
for batch_index in range(batch_size):
index_start = start_indices[batch_index]
index_end = index_start + segment_size
output_tensor[batch_index] = input_tensor[batch_index, ..., index_start:index_end]
return output_tensor
| [
"torch.zeros",
"torch.hann_window",
"torch.save",
"torch.squeeze",
"torch.LongTensor",
"torch.load",
"torch.zeros_like",
"torch.nn.functional.pad"
] | 1.10.1 | ishine/VITS-1 | 6b76bab881c801322ee3a8d8815ec06dd1c80980 |
1.7 | import torch
import torch.nn as nn
from torch.tensor import Tensor
from typing import Optional, Tuple, Union
def hopfield_core_forward(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Optional[Tensor]
in_proj_bias, # type: Optional[Tensor]
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[Tensor]
k_proj_weight=None, # type: Optional[Tensor]
v_proj_weight=None, # type: Optional[Tensor]
static_k=None, # type: Optional[Tensor]
static_v=None, # type: Optional[Tensor]
key_as_static=False, # type: bool
query_as_static=False, # type: bool
value_as_static=False, # type: bool
value_as_connected=False, # type: bool
normalize_pattern=False, # type: bool
p_norm_weight=None, # type: Optional[Tensor]
p_norm_bias=None, # type: Optional[Tensor]
head_dim=None, # type: Optional[int]
pattern_dim=None, # type: Optional[int]
scaling=None, # type: Optional[Union[float, Tensor]]
update_steps_max=0, # type: Optional[Union[int, Tensor]]
update_steps_eps=1e-4, # type: Union[float, Tensor]
return_raw_associations=False, # type: bool
return_projected_patterns=False # type: bool
):
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
See "Hopfield Networks is All You Need" for more details in the setting of Hopfield networks.
embed_dim_to_check: total dimension of the model (in case of default head dimension).
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
key_as_static: interpret specified key as being static.
query_as_static: interpret specified key as being static.
value_as_static: interpret specified key as being static.
value_as_connected: connect value projection with key projection.
normalize_pattern: enable normalization of patterns.
p_norm_weight, p_norm_bias: pattern normalization weight and bias.
head_dim: dimensionality of each head.
pattern_dim: dimensionality of each projected value input.
scaling: scaling of association heads, often represented as beta (one entry per head).
update_steps_max: maximum count of association update steps (None equals to infinity).
update_steps_eps: minimum difference threshold between two consecutive association update steps.
return_raw_associations: return raw association (softmax) values, unmodified.
return_projected_patterns: return pattern projection values, unmodified.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, head_dim)`, where S is the source sequence length, N is the batch size.
- static_v: :math:`(N*num_heads, S, head_dim)`, where S is the source sequence length, N is the batch size.
- scaling: :math:`(num_heads,)`, where num_heads is the amount of heads.
Outputs:
- attn_output: :math:`(L, N, E)`, where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)`, where N is the batch size,
L is the target sequence length, S is the source sequence length.
- attn_raw: :math:``(N, num_heads, L, S)`, where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and nn.functional.has_torch_function(tens_ops):
return nn.functional.handle_torch_function(
hopfield_core_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v,
key_as_static=key_as_static, query_as_static=query_as_static,
value_as_static=value_as_static, value_as_connected=value_as_connected,
normalize_pattern=normalize_pattern, p_norm_weight=p_norm_weight, p_norm_bias=p_norm_bias,
head_dim=head_dim, pattern_dim=pattern_dim, scaling=scaling, update_steps_max=update_steps_max,
update_steps_eps=update_steps_eps, return_raw_associations=return_raw_associations)
tgt_len, bsz, embed_dim = query.shape[0], value.shape[1], query.shape[2]
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
assert (scaling is None) or (type(scaling) in (float, torch.Tensor))
if type(scaling) == torch.Tensor:
assert scaling.ndimension() == 1 and scaling.shape[0] == num_heads, "only one entry per head."
assert (update_steps_max is None) or (type(update_steps_max) in (int, torch.Tensor))
if type(update_steps_max) == torch.Tensor:
assert update_steps_max.ndimension() == 1 and update_steps_max.shape[0] == num_heads, "only one entry per head."
elif type(update_steps_max) == int:
update_steps_max = torch.tensor([update_steps_max] * num_heads, dtype=torch.int32, device=query.device)
elif update_steps_max is None:
update_steps_max = -torch.ones(size=(num_heads,), dtype=torch.int32, device=query.device)
assert type(update_steps_eps) in (float, torch.Tensor)
if type(update_steps_eps) == torch.Tensor:
assert update_steps_eps.ndimension() == 1 and update_steps_eps.shape[0] == num_heads, "only one entry per head."
assert (update_steps_eps <= 0.0).sum() == 0, "only positive thresholds allowed."
update_steps_eps = update_steps_eps.to(device=query.device)
elif type(update_steps_eps) == float:
assert update_steps_eps > 0, "only positive thresholds allowed."
update_steps_eps = torch.tensor([update_steps_eps] * num_heads, dtype=query.dtype, device=query.device)
# Adapt dimensionality of each each.
if head_dim is None:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, r'embed_dim must be divisible by num_heads.'
hopfield_dim = num_heads * head_dim
# Adapt dimensionality of each value projection.
if pattern_dim is None:
pattern_dim = head_dim
assert (not value_as_connected) or (pattern_dim == head_dim)
q, k, v, xi, src_len = None, None, None, None, 0
update_step, xi_old, xi_difference_norm = 0, None, float(r'+inf')
update_active_heads = torch.tensor([[[True]]] * num_heads * bsz, device=query.device)
assert update_active_heads.any(), "at least one head needs to be active."
####################################################################################################################
# BEGIN HOPFIELD UPDATE ITERATION #
####################################################################################################################
while update_active_heads.any():
# The query is already projected into the "Hopfield" space at "update_step" equals 0.
# No more projection necessary if "update_step" greater than 0.
if update_step == 0:
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value) and not (
key_as_static or query_as_static or value_as_static):
# self-attention
q, k, v = nn.functional.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value) and not (key_as_static or value_as_static):
# encoder-decoder attention
_start, _end = 0, hopfield_dim
if query_as_static:
q = query.repeat(1, num_heads, 1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = nn.functional.linear(query, _w, _b)
_start = hopfield_dim
_end = None
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1)
else:
_start, _end = 0, hopfield_dim
if query_as_static:
q = query.repeat(1, num_heads, 1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = nn.functional.linear(query, _w, _b)
_start += hopfield_dim
_end += hopfield_dim
if key_as_static:
k = key.repeat(1, num_heads, 1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = nn.functional.linear(key, _w, _b)
_start += hopfield_dim
_end += hopfield_dim
if value_as_static:
v = value.repeat(1, num_heads, 1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
v = nn.functional.linear(value, _w, _b)
else:
_start, _end = 0, hopfield_dim
if query_as_static:
q = query.repeat(1, num_heads, 1)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == hopfield_dim and len2 == query.size(-1)
if in_proj_bias is not None:
q = nn.functional.linear(query, q_proj_weight_non_opt, in_proj_bias[_start:_end])
_start += hopfield_dim
_end += hopfield_dim
else:
q = nn.functional.linear(query, q_proj_weight_non_opt, in_proj_bias)
v = value
if key_as_static:
k = key.repeat(1, num_heads, 1)
else:
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == hopfield_dim and len2 == key.size(-1)
_bias = None if in_proj_bias is None else in_proj_bias[_start:_end]
k = nn.functional.linear(key, k_proj_weight_non_opt, _bias)
if value_as_connected:
v = nn.functional.linear(v, k_proj_weight_non_opt, _bias)
_start += hopfield_dim
_end += num_heads * pattern_dim
if value_as_static:
if not (value_as_connected or key_as_static):
v = v.repeat(1, num_heads, 1)
else:
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == (num_heads * pattern_dim) and len2 == v.size(-1)
if in_proj_bias is not None:
v = nn.functional.linear(v, v_proj_weight_non_opt, in_proj_bias[_start:])
else:
v = nn.functional.linear(v, v_proj_weight_non_opt, in_proj_bias)
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or \
attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for attn_mask in nn.HopfieldCore is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
# print(attn_mask.size(), [1, query.size(0), key.size(0)])
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# Optionally normalize patterns.
if normalize_pattern:
q = torch.nn.functional.layer_norm(
input=q.reshape(shape=(-1, head_dim)), normalized_shape=(head_dim,),
weight=p_norm_weight, bias=p_norm_bias).reshape(shape=q.shape)
k = torch.nn.functional.layer_norm(
input=k.reshape(shape=(-1, head_dim)), normalized_shape=(head_dim,),
weight=p_norm_weight, bias=p_norm_bias).reshape(shape=k.shape)
else:
active_xi = xi.masked_select(mask=update_active_heads).view(size=(-1, *xi.shape[1:]))
active_k = k.masked_select(mask=update_active_heads).view(size=(-1, *k.shape[1:]))
q = torch.masked_scatter(input=q, mask=update_active_heads, source=torch.bmm(active_xi, active_k))
# Optionally scale association heads (each head separately).
if type(scaling) == float:
q = q * scaling
elif type(scaling) == torch.Tensor:
q = q * scaling.view(1, 1, -1).repeat(repeats=(1, 1, q.shape[2] // scaling.shape[0]))
if update_step == 0:
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.HopfieldCore is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None and key_as_static is None and value_as_static is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = nn.functional.pad(attn_mask, [0, 1])
if key_padding_mask is not None:
key_padding_mask = nn.functional.pad(key_padding_mask, [0, 1])
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
assert not key_as_static, "bias cannot be added to static key."
assert not value_as_static, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, -1, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(v.shape[0], bsz * num_heads, -1).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == pattern_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = nn.functional.pad(attn_mask, [0, 1])
if key_padding_mask is not None:
key_padding_mask = nn.functional.pad(key_padding_mask, [0, 1])
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
# Compute new xi for Hopfield retrieve iterations.
if xi is None:
xi = nn.functional.softmax(attn_output_weights, dim=-1)
else:
xi = torch.masked_scatter(input=xi, mask=update_active_heads, source=nn.functional.softmax(
attn_output_weights.masked_select(mask=update_active_heads).view(size=(-1, *xi.shape[1:])), dim=-1))
# Compute threshold-based stopping criterion for Hopfield retrieve iterations.
with torch.no_grad():
xi_active = xi.view(size=(bsz, num_heads, tgt_len, src_len))
update_active_heads = (update_step < update_steps_max) | (update_steps_max < 0)
if xi_old is not None:
update_active_heads &= ((xi_old - xi_active).norm(p=2, dim=(2, 3)).max(axis=0)[0]) > update_steps_eps
update_active_heads = update_active_heads.unsqueeze(dim=1).unsqueeze(dim=2).repeat(repeats=(bsz, 1, 1))
xi_old = xi_active
update_step += 1
####################################################################################################################
# END HOPFIELD UPDATE ITERATION #
####################################################################################################################
attn_output_weights = nn.functional.dropout(xi, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.shape[:2]) == [bsz * num_heads, tgt_len]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, -1)
if out_proj_weight is not None:
assert attn_output.shape[2] == num_heads * pattern_dim
attn_output = nn.functional.linear(attn_output, out_proj_weight, out_proj_bias)
xi = xi.view(bsz, num_heads, tgt_len, src_len) if return_raw_associations else None
v = v.view(bsz, num_heads, src_len, -1) if return_projected_patterns else None
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads, xi, v
else:
return attn_output, None, xi, v
| [
"torch.jit._unwrap_optional",
"torch.nn.functional.dropout",
"torch.no_grad",
"torch.bmm",
"torch.ones",
"torch.nn.functional.linear",
"torch.jit.is_scripting",
"torch.tensor",
"torch.nn.functional.handle_torch_function",
"torch.nn.functional.softmax",
"torch.nn.functional.pad",
"torch.equal",
"torch.nn.functional.has_torch_function"
] | 1.7.1 | shalei120/HopfieldLM | 3fba4ee05bfc7f5041593f95457ffdf0bdc094a3 |
1.10 | import torchaudio
import torch
class LJSpeechDataset(torchaudio.datasets.LJSPEECH):
def __init__(self, root: str):
super().__init__(root=root)
def __getitem__(self, index: int):
waveform, sr, _, transcript = super().__getitem__(index)
waveform_length = torch.tensor([waveform.shape[-1]]).int()
return transcript, waveform, waveform_length
| [
"torch.tensor"
] | 1.10.0 | khaykingleb/HiFi-GAN | 6bafd6f8f67d2393e057cb64cd6c1311d59a85f0 |
0.4 | """
SqueezeNext for ImageNet-1K, implemented in PyTorch.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import ConvBlock, conv1x1_block, conv7x7_block
class SqnxtUnit(nn.Module):
"""
SqueezeNext unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(SqnxtUnit, self).__init__()
if stride == 2:
reduction_den = 1
self.resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
self.resize_identity = True
else:
reduction_den = 2
self.resize_identity = False
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
stride=stride,
bias=True)
self.conv2 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
bias=True)
self.conv3 = ConvBlock(
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
stride=1,
padding=(0, 1),
bias=True)
self.conv4 = ConvBlock(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
stride=1,
padding=(1, 0),
bias=True)
self.conv5 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
bias=True)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=True)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x + identity
x = self.activ(x)
return x
class SqnxtInitBlock(nn.Module):
"""
SqueezeNext specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(SqnxtInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
padding=1,
bias=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class SqueezeNext(nn.Module):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SqueezeNext, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", SqnxtInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SqnxtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module('final_block', conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bias=True))
in_channels = final_block_channels
self.features.add_module('final_pool', nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained)
# net.eval()
net.train()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.randn"
] | 0.4.0 | yick2232/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 |
0.4 | """
WRN for ImageNet-1K, implemented in PyTorch.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['WRN', 'wrn50_2']
import os
import torch.nn as nn
import torch.nn.init as init
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
activate):
super(WRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
def wrn_conv1x1(in_channels,
out_channels,
stride,
activate):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
activate=activate)
def wrn_conv3x3(in_channels,
out_channels,
stride,
activate):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
activate=activate)
class WRNBottleneck(nn.Module):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self,
in_channels,
out_channels,
stride,
width_factor):
super(WRNBottleneck, self).__init__()
mid_channels = int(round(out_channels // 4 * width_factor))
self.conv1 = wrn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
stride=1,
activate=True)
self.conv2 = wrn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activate=True)
self.conv3 = wrn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
stride=1,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class WRNUnit(nn.Module):
"""
WRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self,
in_channels,
out_channels,
stride,
width_factor):
super(WRNUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = WRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
width_factor=width_factor)
if self.resize_identity:
self.identity_conv = wrn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activate=False)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class WRNInitBlock(nn.Module):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(WRNInitBlock, self).__init__()
self.conv = WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=2,
padding=3,
activate=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class WRN(nn.Module):
"""
WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
width_factor : float
Wide scale factor for width of layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
width_factor,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(WRN, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", WRNInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), WRNUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
width_factor=width_factor))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module('final_pool', nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_wrn(blocks,
width_factor,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create WRN model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_factor : float
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported WRN with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = WRN(
channels=channels,
init_block_channels=init_block_channels,
width_factor=width_factor,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def wrn50_2(**kwargs):
"""
WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn(blocks=50, width_factor=2.0, model_name="wrn50_2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
wrn50_2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn50_2 or weight_count == 68849128)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.randn"
] | 0.4.0 | yick2232/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 |
1.7 | import logging
import warnings
from typing import Optional, Union
import numpy as np
import torch
from anndata import AnnData
from scvi import _CONSTANTS
from scvi.model.base import (
BaseModelClass,
RNASeqMixin,
UnsupervisedTrainingMixin,
VAEMixin,
)
from scvi.module import VAEC
logger = logging.getLogger(__name__)
class CondSCVI(RNASeqMixin, VAEMixin, UnsupervisedTrainingMixin, BaseModelClass):
"""
Conditional version of single-cell Variational Inference, used for hierarchical deconvolution of spatial transcriptomics data.
Parameters
----------
adata
AnnData object that has been registered via :func:`~scvi.data.setup_anndata`.
n_hidden
Number of nodes per hidden layer.
n_latent
Dimensionality of the latent space.
n_layers
Number of hidden layers used for encoder and decoder NNs.
dropout_rate
Dropout rate for the encoder neural networks.
weight_obs
Whether to reweight observations by their inverse proportion (useful for lowly abundant cell types)
**module_kwargs
Keyword args for :class:`~scvi.modules.VAEC`
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> scvi.data.setup_anndata(adata, batch_key="batch")
>>> vae = scvi.external.CondSCVI(adata)
>>> vae.train()
>>> adata.obsm["X_CondSCVI"] = vae.get_latent_representation()
"""
def __init__(
self,
adata: AnnData,
n_hidden: int = 128,
n_latent: int = 5,
n_layers: int = 2,
dropout_rate: float = 0.1,
weight_obs: bool = False,
**module_kwargs,
):
super(CondSCVI, self).__init__(adata)
n_labels = self.summary_stats["n_labels"]
n_vars = self.summary_stats["n_vars"]
if weight_obs:
ct_counts = adata.obs["_scvi_labels"].value_counts()[range(n_labels)].values
ct_prop = ct_counts / np.sum(ct_counts)
ct_prop[ct_prop < 0.05] = 0.05
ct_prop = ct_prop / np.sum(ct_prop)
ct_weight = 1.0 / ct_prop
module_kwargs.update({"ct_weight": ct_weight})
self.module = VAEC(
n_input=n_vars,
n_labels=n_labels,
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
dropout_rate=dropout_rate,
**module_kwargs,
)
self._model_summary_string = (
"Conditional SCVI Model with the following params: \nn_hidden: {}, n_latent: {}, n_layers: {}, dropout_rate: {}, weight_obs: {}"
).format(n_hidden, n_latent, n_layers, dropout_rate, weight_obs)
self.init_params_ = self._get_init_params(locals())
@torch.no_grad()
def get_vamp_prior(
self,
adata: Optional[AnnData] = None,
p: int = 50,
) -> np.ndarray:
r"""
Return an empirical prior over the cell-type specific latent space (vamp prior) that may be used for deconvolution.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
p
number of components in the mixture model underlying the empirical prior
Returns
-------
mean_vprior: np.ndarray
(n_labels, p, D) array
var_vprior
(n_labels, p, 3) array
"""
if self.is_trained_ is False:
warnings.warn(
"Trying to query inferred values from an untrained model. Please train the model first."
)
adata = self._validate_anndata(adata)
mean_vprior = np.zeros(
(self.summary_stats["n_labels"], p, self.module.n_latent)
)
var_vprior = np.zeros((self.summary_stats["n_labels"], p, self.module.n_latent))
key = self.scvi_setup_dict_["categorical_mappings"]["_scvi_labels"][
"original_key"
]
mapping = self.scvi_setup_dict_["categorical_mappings"]["_scvi_labels"][
"mapping"
]
for ct in range(self.summary_stats["n_labels"]):
# pick p cells
local_indices = np.random.choice(
np.where(adata.obs[key] == mapping[ct])[0], p
)
# get mean and variance from posterior
scdl = self._make_data_loader(
adata=adata, indices=local_indices, batch_size=p
)
mean = []
var = []
for tensors in scdl:
x = tensors[_CONSTANTS.X_KEY]
y = tensors[_CONSTANTS.LABELS_KEY]
out = self.module.inference(x, y)
mean_, var_ = out["qz_m"], out["qz_v"]
mean += [mean_.cpu()]
var += [var_.cpu()]
mean_vprior[ct], var_vprior[ct] = np.array(torch.cat(mean)), np.array(
torch.cat(var)
)
return mean_vprior, var_vprior
def train(
self,
max_epochs: int = 400,
lr: float = 0.001,
use_gpu: Optional[Union[str, int, bool]] = None,
train_size: float = 1,
validation_size: Optional[float] = None,
batch_size: int = 128,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using MAP inference.
Parameters
----------
max_epochs
Number of epochs to train for
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str), or use CPU (if False).
train_size
Size of training set in the range [0.0, 1.0].
validation_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + validation_size < 1`, the remaining cells belong to a test set.
batch_size
Minibatch size to use during training.
plan_kwargs
Keyword args for :class:`~scvi.train.TrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
update_dict = {
"lr": lr,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
super().train(
max_epochs=max_epochs,
use_gpu=use_gpu,
train_size=train_size,
validation_size=validation_size,
batch_size=batch_size,
plan_kwargs=plan_kwargs,
**kwargs,
)
| [
"torch.cat",
"torch.no_grad"
] | 1.7.1 | morris-frank/scvi-tools | b828c75455bdd9e9558882d0b110ed97ba135184 |
0.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.anchors import Anchors
class SiamMask(nn.Module):
def __init__(self, anchors=None, o_sz=127, g_sz=127):
super(SiamMask, self).__init__()
self.anchors = anchors # anchor_cfg
self.anchor_num = len(self.anchors["ratios"]) * len(self.anchors["scales"])
self.anchor = Anchors(anchors)
self.features = None
self.rpn_model = None
self.mask_model = None
self.o_sz = o_sz
self.g_sz = g_sz
self.upSample = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])
self.all_anchors = None
def set_all_anchors(self, image_center, size):
# cx,cy,w,h
if not self.anchor.generate_all_anchors(image_center, size):
return
all_anchors = self.anchor.all_anchors[1] # cx, cy, w, h
self.all_anchors = torch.from_numpy(all_anchors).float().cuda()
self.all_anchors = [self.all_anchors[i] for i in range(4)]
def feature_extractor(self, x):
return self.features(x)
def rpn(self, template, search):
pred_cls, pred_loc = self.rpn_model(template, search)
return pred_cls, pred_loc
def mask(self, template, search):
pred_mask = self.mask_model(template, search)
return pred_mask
def _add_rpn_loss(self, label_cls, label_loc, lable_loc_weight, label_mask, label_mask_weight,
rpn_pred_cls, rpn_pred_loc, rpn_pred_mask):
rpn_loss_cls = select_cross_entropy_loss(rpn_pred_cls, label_cls)
rpn_loss_loc = weight_l1_loss(rpn_pred_loc, label_loc, lable_loc_weight)
rpn_loss_mask, iou_m, iou_5, iou_7 = select_mask_logistic_loss(rpn_pred_mask, label_mask, label_mask_weight)
return rpn_loss_cls, rpn_loss_loc, rpn_loss_mask, iou_m, iou_5, iou_7
def run(self, template, search, softmax=False):
"""
run network
"""
template_feature = self.feature_extractor(template)
feature, search_feature = self.features.forward_all(search)
rpn_pred_cls, rpn_pred_loc = self.rpn(template_feature, search_feature)
corr_feature = self.mask_model.mask.forward_corr(template_feature, search_feature) # (b, 256, w, h)
rpn_pred_mask = self.refine_model(feature, corr_feature)
if softmax:
rpn_pred_cls = self.softmax(rpn_pred_cls)
return rpn_pred_cls, rpn_pred_loc, rpn_pred_mask, template_feature, search_feature
def softmax(self, cls):
b, a2, h, w = cls.size()
cls = cls.view(b, 2, a2//2, h, w)
cls = cls.permute(0, 2, 3, 4, 1).contiguous()
cls = F.log_softmax(cls, dim=4)
return cls
def forward(self, input, softmax):
"""
:param input: dict of input with keys of:
'template': [b, 3, h1, w1], input template image.
'search': [b, 3, h2, w2], input search image.
'label_cls':[b, max_num_gts, 5] or None(self.training==False),
each gt contains x1,y1,x2,y2,class.
:return: dict of loss, predict, accuracy
"""
template = input['template']
search = input['search']
if self.training:
label_cls = input['label_cls']
label_loc = input['label_loc']
lable_loc_weight = input['label_loc_weight']
label_mask = input['label_mask']
label_mask_weight = input['label_mask_weight']
rpn_pred_cls, rpn_pred_loc, rpn_pred_mask, template_feature, search_feature = \
self.run(template, search, softmax=softmax)
outputs = dict()
outputs['predict'] = [rpn_pred_loc, rpn_pred_cls, rpn_pred_mask, template_feature, search_feature]
if self.training:
rpn_loss_cls, rpn_loss_loc, rpn_loss_mask, iou_acc_mean, iou_acc_5, iou_acc_7 = \
self._add_rpn_loss(label_cls, label_loc, lable_loc_weight, label_mask, label_mask_weight,
rpn_pred_cls, rpn_pred_loc, rpn_pred_mask)
outputs['losses'] = [rpn_loss_cls, rpn_loss_loc, rpn_loss_mask]
outputs['accuracy'] = [iou_acc_mean, iou_acc_5, iou_acc_7]
return outputs
def template(self, z):
self.zf = self.feature_extractor(z)
cls_kernel, loc_kernel = self.rpn_model.template(self.zf)
return cls_kernel, loc_kernel
def track(self, x, cls_kernel=None, loc_kernel=None, softmax=False):
xf = self.feature_extractor(x)
rpn_pred_cls, rpn_pred_loc = self.rpn_model.track(xf, cls_kernel, loc_kernel)
if softmax:
rpn_pred_cls = self.softmax(rpn_pred_cls)
return rpn_pred_cls, rpn_pred_loc
def get_cls_loss(pred, label, select):
if select.nelement() == 0: return pred.sum()*0.
pred = torch.index_select(pred, 0, select)
label = torch.index_select(label, 0, select)
return F.nll_loss(pred, label)
def select_cross_entropy_loss(pred, label):
pred = pred.view(-1, 2)
label = label.view(-1)
pos = Variable(label.data.eq(1).nonzero().squeeze()).cuda()
neg = Variable(label.data.eq(0).nonzero().squeeze()).cuda()
loss_pos = get_cls_loss(pred, label, pos)
loss_neg = get_cls_loss(pred, label, neg)
return loss_pos * 0.5 + loss_neg * 0.5
def weight_l1_loss(pred_loc, label_loc, loss_weight):
"""
:param pred_loc: [b, 4k, h, w]
:param label_loc: [b, 4k, h, w]
:param loss_weight: [b, k, h, w]
:return: loc loss value
"""
b, _, sh, sw = pred_loc.size()
pred_loc = pred_loc.view(b, 4, -1, sh, sw)
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1).view(b, -1, sh, sw)
loss = diff * loss_weight
return loss.sum().div(b)
def select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):
weight = weight.view(-1)
pos = Variable(weight.data.eq(1).nonzero().squeeze())
if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0
if len(p_m.shape) == 4:
p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)
p_m = torch.index_select(p_m, 0, pos)
p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)
p_m = p_m.view(-1, g_sz * g_sz)
else:
p_m = torch.index_select(p_m, 0, pos)
mask_uf = F.unfold(mask, (g_sz, g_sz), padding=0, stride=8)
mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)
mask_uf = torch.index_select(mask_uf, 0, pos)
loss = F.soft_margin_loss(p_m, mask_uf)
iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
return loss, iou_m, iou_5, iou_7
def iou_measure(pred, label):
pred = pred.ge(0)
mask_sum = pred.eq(1).add(label.eq(1))
intxn = torch.sum(mask_sum == 2, dim=1).float()
union = torch.sum(mask_sum > 0, dim=1).float()
iou = intxn/union
return torch.mean(iou), (torch.sum(iou > 0.5).float()/iou.shape[0]), (torch.sum(iou > 0.7).float()/iou.shape[0])
if __name__ == "__main__":
p_m = torch.randn(4, 63*63, 25, 25)
cls = torch.randn(4, 1, 25, 25) > 0.9
mask = torch.randn(4, 1, 255, 255) * 2 - 1
loss = select_mask_logistic_loss(p_m, mask, cls)
print(loss)
| [
"torch.nn.functional.nll_loss",
"torch.nn.functional.unfold",
"torch.nn.UpsamplingBilinear2d",
"torch.transpose",
"torch.nn.functional.log_softmax",
"torch.from_numpy",
"torch.mean",
"torch.nn.functional.soft_margin_loss",
"torch.index_select",
"torch.randn",
"torch.sum"
] | 0.4.1 | weihaosky/CycleSiam | 9d11f6cb236a6699185774e49ebafe8d2f867ebe |
1.6 | """
Copyright (c) 2017 Matterport, Inc.
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import datetime
import math
import os
import random
import re
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import utils
from nms.nms_wrapper import nms
from roialign.roi_align.crop_and_resize import CropAndResizeFunction
import cv2
from models.modules import *
from utils import *
############################################################
# Pytorch Utility Functions
############################################################
def unique1d(tensor):
if tensor.size()[0] == 0 or tensor.size()[0] == 1:
return tensor
tensor = tensor.sort()[0]
unique_bool = tensor[1:] != tensor [:-1]
first_element = Variable(torch.ByteTensor([True]), requires_grad=False)
if tensor.is_cuda:
first_element = first_element.cuda()
unique_bool = torch.cat((first_element, unique_bool),dim=0)
return tensor[unique_bool.data]
def intersect1d(tensor1, tensor2):
aux = torch.cat((tensor1, tensor2),dim=0)
aux = aux.sort()[0]
return aux[:-1][(aux[1:] == aux[:-1]).data]
def log2(x):
"""Implementatin of Log2. Pytorch doesn't have a native implemenation."""
ln2 = Variable(torch.log(torch.FloatTensor([2.0])), requires_grad=False)
if x.is_cuda:
ln2 = ln2.cuda()
return torch.log(x) / ln2
class SamePad2d(nn.Module):
"""Mimics tensorflow's 'SAME' padding.
"""
def __init__(self, kernel_size, stride):
super(SamePad2d, self).__init__()
self.kernel_size = torch.nn.modules.utils._pair(kernel_size)
self.stride = torch.nn.modules.utils._pair(stride)
def forward(self, input):
in_width = input.size()[2]
in_height = input.size()[3]
out_width = math.ceil(float(in_width) / float(self.stride[0]))
out_height = math.ceil(float(in_height) / float(self.stride[1]))
pad_along_width = ((out_width - 1) * self.stride[0] +
self.kernel_size[0] - in_width)
pad_along_height = ((out_height - 1) * self.stride[1] +
self.kernel_size[1] - in_height)
pad_left = math.floor(pad_along_width / 2)
pad_top = math.floor(pad_along_height / 2)
pad_right = pad_along_width - pad_left
pad_bottom = pad_along_height - pad_top
return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), 'constant', 0)
def __repr__(self):
return self.__class__.__name__
############################################################
# FPN Graph
############################################################
class FPN(nn.Module):
def __init__(self, C1, C2, C3, C4, C5, out_channels, bilinear_upsampling=False):
super(FPN, self).__init__()
self.out_channels = out_channels
self.bilinear_upsampling = bilinear_upsampling
self.C1 = C1
self.C2 = C2
self.C3 = C3
self.C4 = C4
self.C5 = C5
self.P6 = nn.MaxPool2d(kernel_size=1, stride=2)
self.P5_conv1 = nn.Conv2d(2048, self.out_channels, kernel_size=1, stride=1)
self.P5_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
self.P4_conv1 = nn.Conv2d(1024, self.out_channels, kernel_size=1, stride=1)
self.P4_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
self.P3_conv1 = nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1)
self.P3_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
self.P2_conv1 = nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1)
self.P2_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
def forward(self, x):
#print("x",x.size())
x = self.C1(x)
x = self.C2(x)
c2_out = x
#print("c2_out",c2_out.size())
x = self.C3(x)
c3_out = x
#print("c3_out",c3_out.size())
x = self.C4(x)
c4_out = x
#print("c4_out",c4_out.size())
x = self.C5(x)
p5_out = self.P5_conv1(x)
#print("p5_out",p5_out.size())
if self.bilinear_upsampling:
p4_out = self.P4_conv1(c4_out) + F.upsample(p5_out, scale_factor=2, mode='bilinear')
p3_out = self.P3_conv1(c3_out) + F.upsample(p4_out, scale_factor=2, mode='bilinear')
p2_out = self.P2_conv1(c2_out) + F.upsample(p3_out, scale_factor=2, mode='bilinear')
else:
p4_out = self.P4_conv1(c4_out) + F.upsample(p5_out, scale_factor=2)
p3_out = self.P3_conv1(c3_out) + F.upsample(p4_out, scale_factor=2)
p2_out = self.P2_conv1(c2_out) + F.upsample(p3_out, scale_factor=2)
pass
p5_out = self.P5_conv2(p5_out)
p4_out = self.P4_conv2(p4_out)
p3_out = self.P3_conv2(p3_out)
p2_out = self.P2_conv2(p2_out)
## P6 is used for the 5th anchor scale in RPN. Generated by
## subsampling from P5 with stride of 2.
p6_out = self.P6(p5_out)
# print(p6_out[0].size())
# print(p5_out[0].size())
# print(p4_out[0].size())
# print(p3_out[0].size())
# print(p2_out[0].size())
# print("done with p's")
return [p2_out, p3_out, p4_out, p5_out, p6_out]
############################################################
# Resnet Graph
############################################################
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride)
self.bn1 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)
self.padding2 = SamePad2d(kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3)
self.bn2 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1)
self.bn3 = nn.BatchNorm2d(planes * 4, eps=0.001, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.padding2(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, architecture, stage5=False, numInputChannels=3):
super(ResNet, self).__init__()
assert architecture in ["resnet50", "resnet101"]
self.inplanes = 64
self.layers = [3, 4, {"resnet50": 6, "resnet101": 23}[architecture], 3]
self.block = Bottleneck
self.stage5 = stage5
self.C1 = nn.Sequential(
nn.Conv2d(numInputChannels, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True),
SamePad2d(kernel_size=3, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.C2 = self.make_layer(self.block, 64, self.layers[0])
self.C3 = self.make_layer(self.block, 128, self.layers[1], stride=2)
self.C4 = self.make_layer(self.block, 256, self.layers[2], stride=2)
if self.stage5:
self.C5 = self.make_layer(self.block, 512, self.layers[3], stride=2)
else:
self.C5 = None
def forward(self, x):
x = self.C1(x)
x = self.C2(x)
x = self.C3(x)
x = self.C4(x)
x = self.C5(x)
return x
def stages(self):
return [self.C1, self.C2, self.C3, self.C4, self.C5]
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride),
nn.BatchNorm2d(planes * block.expansion, eps=0.001, momentum=0.01),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
############################################################
# Proposal Layer
############################################################
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
## Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
## Apply deltas
center_y = center_y + deltas[:, 0] * height
center_x = center_x + deltas[:, 1] * width
height = height * torch.exp(deltas[:, 2])
width = width * torch.exp(deltas[:, 3])
## Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = torch.stack([y1, x1, y2, x2], dim=1)
return result
def clip_boxes(boxes, window):
"""
boxes: [N, 4] each col is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
boxes = torch.stack( \
[boxes[:, 0].clamp(float(window[0]), float(window[2])),
boxes[:, 1].clamp(float(window[1]), float(window[3])),
boxes[:, 2].clamp(float(window[0]), float(window[2])),
boxes[:, 3].clamp(float(window[1]), float(window[3]))], 1)
return boxes
def proposal_layer(inputs, proposal_count, nms_threshold, anchors, config=None):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinment detals to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
#print("proposal count", proposal_count)
## Currently only supports batchsize 1
inputs[0] = inputs[0].squeeze(0)
inputs[1] = inputs[1].squeeze(0)
## Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, 1]
#print('scores',scores.size())
## Box deltas [batch, num_rois, 4]
deltas = inputs[1]
#print('deltas',deltas.size())
std_dev = Variable(torch.from_numpy(np.reshape(config.RPN_BBOX_STD_DEV, [1, 4])).float(), requires_grad=False)
if config.GPU_COUNT:
std_dev = std_dev.cuda()
deltas = deltas * std_dev
#print('deltas2', deltas.size())
## Improve performance by trimming to top anchors by score
## and doing the rest on the smaller subset.
#
# print("anchor size", anchors.size())
# print("anchor size", len(anchors))
pre_nms_limit = min(6000, anchors.size()[0])
scores, order = scores.sort(descending=True)
order = order[:pre_nms_limit]
scores = scores[:pre_nms_limit]
deltas = deltas[order.data, :]
anchors = anchors[order.data, :]
#print(anchors.size())
## Apply deltas to anchors to get refined anchors.
## [batch, N, (y1, x1, y2, x2)]
boxes = apply_box_deltas(anchors, deltas)
#print('boxes', boxes.size())
## Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = clip_boxes(boxes, window)
#print('boxes2', boxes.size())
## Filter out small boxes
## According to Xinlei Chen's paper, this reduces detection accuracy
## for small objects, so we're skipping it.
## Non-max suppression
keep = nms(torch.cat((boxes, scores.unsqueeze(1)), 1).data, nms_threshold)
#print('keep0', keep.size())
keep = keep[:proposal_count]
#print('keep', keep.size())
boxes = boxes[keep, :]
#print('boxes3', boxes.size())
## Normalize dimensions to range of 0 to 1.
norm = Variable(torch.from_numpy(np.array([height, width, height, width])).float(), requires_grad=False)
if config.GPU_COUNT:
norm = norm.cuda()
normalized_boxes = boxes / norm
#print('norm_boxes', normalized_boxes.size())
## Add back batch dimension
normalized_boxes = normalized_boxes.unsqueeze(0)
#print(normalized_boxes.size())
return normalized_boxes
############################################################
# ROIAlign Layer
############################################################
def pyramid_roi_align(inputs, pool_size, image_shape):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_size: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, channels, height, width]
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
# print("inputs",len(inputs))
# print("inputs0", len(inputs[0]))
# print("inputs00", inputs[0].size())
# print("inputs1", len(inputs[1]))
## Currently only supports batchsize 1
for i in range(len(inputs)):
inputs[i] = inputs[i].squeeze(0)
## Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# print('boxes', len(boxes), boxes.size())
## Feature Maps. List of feature maps from different level of the
## feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# print('feature_maps', len(feature_maps), feature_maps[0].size())
## Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = boxes.chunk(4, dim=1)
h = y2 - y1
w = x2 - x1
## Equation 1 in the Feature Pyramid Networks paper. Account for
## the fact that our coordinates are normalized here.
## e.g. a 224x224 ROI (in pixels) maps to P4
image_area = Variable(torch.FloatTensor([float(image_shape[0]*image_shape[1])]), requires_grad=False)
if boxes.is_cuda:
image_area = image_area.cuda()
roi_level = 4 + log2(torch.sqrt(h*w)/(224.0/torch.sqrt(image_area)))
roi_level = roi_level.round().int()
roi_level = roi_level.clamp(2, 5)
## Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = roi_level==level
if not ix.any():
continue
ix = torch.nonzero(ix)[:,0]
level_boxes = boxes[ix.data, :]
## Keep track of which box is mapped to which level
box_to_level.append(ix.data)
## Stop gradient propogation to ROI proposals
level_boxes = level_boxes.detach()
## Crop and Resize
## From Mask R-CNN paper: "We sample four regular locations, so
## that we can evaluate either max or average pooling. In fact,
## interpolating only a single value at each bin center (without
## pooling) is nearly as effective."
#
## Here we use the simplified approach of a single value per bin,
## which is how it's done in tf.crop_and_resize()
## Result: [batch * num_boxes, pool_height, pool_width, channels]
ind = Variable(torch.zeros(level_boxes.size()[0]),requires_grad=False).int()
if level_boxes.is_cuda:
ind = ind.cuda()
feature_maps[i] = feature_maps[i].unsqueeze(0) #CropAndResizeFunction needs batch dimension
pooled_features = CropAndResizeFunction(pool_size, pool_size, 0)(feature_maps[i], level_boxes, ind)
pooled.append(pooled_features)
## Pack pooled features into one tensor
pooled = torch.cat(pooled, dim=0)
## Pack box_to_level mapping into one array and add another
## column representing the order of pooled boxes
box_to_level = torch.cat(box_to_level, dim=0)
## Rearrange pooled features to match the order of the original boxes
_, box_to_level = torch.sort(box_to_level)
pooled = pooled[box_to_level, :, :]
return pooled
def coordinates_roi(inputs, pool_size, image_shape):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_size: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, channels, height, width]
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
# print('inputs', type(inputs), len(inputs))
# print(inputs[0].size())
# print(inputs[1].size())
## Currently only supports batchsize 1
for i in range(len(inputs)):
inputs[i] = inputs[i].squeeze(0)
## Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# print('boxes', boxes.size())
## Feature Maps. List of feature maps from different level of the
## feature pyramid. Each is [batch, height, width, channels]
cooridnates = inputs[1]
# print("cordi", cooridnates.size())
## Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = boxes.chunk(4, dim=1)
h = y2 - y1
w = x2 - x1
## Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
## Stop gradient propogation to ROI proposals
boxes = boxes.detach()
ind = Variable(torch.zeros(boxes.size()[0]),requires_grad=False).int()
if boxes.is_cuda:
ind = ind.cuda()
cooridnates = cooridnates.unsqueeze(0) ## CropAndResizeFunction needs batch dimension
pooled_features = CropAndResizeFunction(pool_size, pool_size, 0)(cooridnates, boxes, ind)
return pooled_features
############################################################
## Detection Target Layer
############################################################
def bbox_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
## 1. Tile boxes2 and repeate boxes1. This allows us to compare
## every boxes1 against every boxes2 without loops.
## TF doesn't have an equivalent to np.repeate() so simulate it
## using tf.tile() and tf.reshape.
boxes1_repeat = boxes2.size()[0]
boxes2_repeat = boxes1.size()[0]
boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4)
boxes2 = boxes2.repeat(boxes2_repeat,1)
## 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1)
b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1)
y1 = torch.max(b1_y1, b2_y1)[:, 0]
x1 = torch.max(b1_x1, b2_x1)[:, 0]
y2 = torch.min(b1_y2, b2_y2)[:, 0]
x2 = torch.min(b1_x2, b2_x2)[:, 0]
zeros = Variable(torch.zeros(y1.size()[0]), requires_grad=False)
if y1.is_cuda:
zeros = zeros.cuda()
intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros)
## 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area[:,0] + b2_area[:,0] - intersection
## 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = iou.view(boxes2_repeat, boxes1_repeat)
return overlaps
def detection_target_layer(proposals, gt_class_ids, gt_boxes, gt_masks, gt_parameters, config):
"""Subsamples proposals and generates target box refinment, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinments.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
"""
## Currently only supports batchsize 1
proposals = proposals.squeeze(0)
gt_class_ids = gt_class_ids.squeeze(0)
gt_boxes = gt_boxes.squeeze(0)
gt_masks = gt_masks.squeeze(0)
gt_parameters = gt_parameters.squeeze(0)
no_crowd_bool = Variable(torch.ByteTensor(proposals.size()[0]*[True]), requires_grad=False)
if config.GPU_COUNT:
no_crowd_bool = no_crowd_bool.cuda()
## Compute overlaps matrix [proposals, gt_boxes]
overlaps = bbox_overlaps(proposals, gt_boxes)
## Determine postive and negative ROIs
roi_iou_max = torch.max(overlaps, dim=1)[0]
## 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = roi_iou_max >= 0.5
#print('positive count', positive_roi_bool.sum())
## Subsample ROIs. Aim for 33% positive
## Positive ROIs
if positive_roi_bool.sum() > 0:
positive_indices = torch.nonzero(positive_roi_bool)[:, 0]
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
rand_idx = torch.randperm(positive_indices.size()[0])
rand_idx = rand_idx[:positive_count]
if config.GPU_COUNT:
rand_idx = rand_idx.cuda()
positive_indices = positive_indices[rand_idx]
positive_count = positive_indices.size()[0]
positive_rois = proposals[positive_indices.data,:]
## Assign positive ROIs to GT boxes.
positive_overlaps = overlaps[positive_indices.data,:]
roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]
roi_gt_boxes = gt_boxes[roi_gt_box_assignment.data,:]
roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment.data]
roi_gt_parameters = gt_parameters[roi_gt_box_assignment.data]
## Compute bbox refinement for positive ROIs
deltas = Variable(utils.box_refinement(positive_rois.data, roi_gt_boxes.data), requires_grad=False)
std_dev = Variable(torch.from_numpy(config.BBOX_STD_DEV).float(), requires_grad=False)
if config.GPU_COUNT:
std_dev = std_dev.cuda()
deltas /= std_dev
## Assign positive ROIs to GT masks
roi_masks = gt_masks[roi_gt_box_assignment.data]
## Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
## Transform ROI corrdinates from normalized image space
## to normalized mini-mask space.
y1, x1, y2, x2 = positive_rois.chunk(4, dim=1)
gt_y1, gt_x1, gt_y2, gt_x2 = roi_gt_boxes.chunk(4, dim=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = torch.cat([y1, x1, y2, x2], dim=1)
box_ids = Variable(torch.arange(roi_masks.size()[0]), requires_grad=False).int()
if config.GPU_COUNT:
box_ids = box_ids.cuda()
if config.NUM_PARAMETER_CHANNELS > 0:
masks = Variable(CropAndResizeFunction(config.MASK_SHAPE[0], config.MASK_SHAPE[1], 0)(roi_masks[:, :, :, 0].contiguous().unsqueeze(1), boxes, box_ids).data, requires_grad=False).squeeze(1)
masks = torch.round(masks)
parameters = Variable(CropAndResizeFunction(config.MASK_SHAPE[0], config.MASK_SHAPE[1], 0)(roi_masks[:, :, :, 1].contiguous().unsqueeze(1), boxes, box_ids).data, requires_grad=False).squeeze(1)
masks = torch.stack([masks, parameters], dim=-1)
else:
masks = Variable(CropAndResizeFunction(config.MASK_SHAPE[0], config.MASK_SHAPE[1], 0)(roi_masks.unsqueeze(1), boxes, box_ids).data, requires_grad=False).squeeze(1)
masks = torch.round(masks)
pass
## Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
## binary cross entropy loss.
else:
positive_count = 0
## 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_roi_bool = roi_iou_max < 0.5
negative_roi_bool = negative_roi_bool & no_crowd_bool
## Negative ROIs. Add enough to maintain positive:negative ratio.
if (negative_roi_bool > 0).sum() > 0 and positive_count>0:
negative_indices = torch.nonzero(negative_roi_bool)[:, 0]
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = int(r * positive_count - positive_count)
rand_idx = torch.randperm(negative_indices.size()[0])
rand_idx = rand_idx[:negative_count]
if config.GPU_COUNT:
rand_idx = rand_idx.cuda()
negative_indices = negative_indices[rand_idx]
negative_count = negative_indices.size()[0]
negative_rois = proposals[negative_indices.data, :]
else:
negative_count = 0
#print('count', positive_count, negative_count)
#print(roi_gt_class_ids)
## Append negative ROIs and pad bbox deltas and masks that
## are not used for negative ROIs with zeros.
if positive_count > 0 and negative_count > 0:
rois = torch.cat((positive_rois, negative_rois), dim=0)
zeros = Variable(torch.zeros(negative_count), requires_grad=False).int()
if config.GPU_COUNT:
zeros = zeros.cuda()
roi_gt_class_ids = torch.cat([roi_gt_class_ids, zeros], dim=0)
zeros = Variable(torch.zeros(negative_count, 4), requires_grad=False)
if config.GPU_COUNT:
zeros = zeros.cuda()
deltas = torch.cat([deltas, zeros], dim=0)
if config.NUM_PARAMETER_CHANNELS > 0:
zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1], 2), requires_grad=False)
else:
zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1]), requires_grad=False)
pass
if config.GPU_COUNT:
zeros = zeros.cuda()
masks = torch.cat([masks, zeros], dim=0)
zeros = Variable(torch.zeros(negative_count, config.NUM_PARAMETERS), requires_grad=False)
if config.GPU_COUNT:
zeros = zeros.cuda()
roi_gt_parameters = torch.cat([roi_gt_parameters, zeros], dim=0)
elif positive_count > 0:
rois = positive_rois
elif negative_count > 0:
rois = negative_rois
zeros = Variable(torch.zeros(negative_count), requires_grad=False)
if config.GPU_COUNT:
zeros = zeros.cuda()
roi_gt_class_ids = zeros
zeros = Variable(torch.zeros(negative_count, 4), requires_grad=False).int()
if config.GPU_COUNT:
zeros = zeros.cuda()
deltas = zeros
zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1]), requires_grad=False)
if config.GPU_COUNT:
zeros = zeros.cuda()
masks = zeros
zeros = Variable(torch.zeros(negative_count, config.NUM_PARAMETERS), requires_grad=False)
if config.GPU_COUNT:
zeros = zeros.cuda()
roi_gt_parameters = torch.cat([roi_gt_parameters, zeros], dim=0)
else:
rois = Variable(torch.FloatTensor(), requires_grad=False)
roi_gt_class_ids = Variable(torch.IntTensor(), requires_grad=False)
deltas = Variable(torch.FloatTensor(), requires_grad=False)
masks = Variable(torch.FloatTensor(), requires_grad=False)
roi_gt_parameters = Variable(torch.FloatTensor(), requires_grad=False)
if config.GPU_COUNT:
rois = rois.cuda()
roi_gt_class_ids = roi_gt_class_ids.cuda()
deltas = deltas.cuda()
masks = masks.cuda()
roi_gt_parameters = roi_gt_parameters.cuda()
pass
return rois, roi_gt_class_ids, deltas, masks, roi_gt_parameters
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes = torch.stack([boxes[:, 0].clamp(float(window[0]), float(window[2])), boxes[:, 1].clamp(float(window[1]), float(window[3])), boxes[:, 2].clamp(float(window[0]), float(window[2])), boxes[:, 3].clamp(float(window[1]), float(window[3]))], dim=-1)
return boxes
def refine_detections(rois, probs, deltas, parameters, window, config, return_indices=False, use_nms=1, one_hot=True):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)]
"""
## Class IDs per ROI
if len(probs.shape) == 1:
class_ids = probs.long()
else:
_, class_ids = torch.max(probs, dim=1)
pass
## Class probability of the top class of each ROI
## Class-specific bounding box deltas
idx = torch.arange(class_ids.size()[0]).long()
if config.GPU_COUNT:
idx = idx.cuda()
if len(probs.shape) == 1:
class_scores = torch.ones(class_ids.shape)
deltas_specific = deltas
class_parameters = parameters
if config.GPU_COUNT:
class_scores = class_scores.cuda()
else:
class_scores = probs[idx, class_ids.data]
deltas_specific = deltas[idx, class_ids.data]
class_parameters = parameters[idx, class_ids.data]
## Apply bounding box deltas
## Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
std_dev = Variable(torch.from_numpy(np.reshape(config.RPN_BBOX_STD_DEV, [1, 4])).float(), requires_grad=False)
if config.GPU_COUNT:
std_dev = std_dev.cuda()
refined_rois = apply_box_deltas(rois, deltas_specific * std_dev)
## Convert coordiates to image domain
height, width = config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([height, width, height, width])).float(), requires_grad=False)
if config.GPU_COUNT:
scale = scale.cuda()
refined_rois = refined_rois * scale
## Clip boxes to image window
refined_rois = clip_to_window(window, refined_rois)
## Round and cast to int since we're deadling with pixels now
refined_rois = torch.round(refined_rois)
## TODO: Filter out boxes with zero area
## Filter out background boxes
keep_bool = class_ids > 0
## Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE and False:
keep_bool = keep_bool & (class_scores >= config.DETECTION_MIN_CONFIDENCE)
keep_bool = keep_bool & (refined_rois[:, 2] > refined_rois[:, 0]) & (refined_rois[:, 3] > refined_rois[:, 1])
if keep_bool.sum() == 0:
if return_indices:
return torch.zeros((0, 10)).cuda(), torch.zeros(0).long().cuda(), torch.zeros((0, 4)).cuda()
else:
return torch.zeros((0, 10)).cuda()
pass
keep = torch.nonzero(keep_bool)[:,0]
if use_nms == 2:
## Apply per-class NMS
pre_nms_class_ids = class_ids[keep.data]
pre_nms_scores = class_scores[keep.data]
pre_nms_rois = refined_rois[keep.data]
ixs = torch.arange(len(pre_nms_class_ids)).long().cuda()
## Sort
ix_rois = pre_nms_rois
ix_scores = pre_nms_scores
ix_scores, order = ix_scores.sort(descending=True)
ix_rois = ix_rois[order.data,:]
nms_keep = nms(torch.cat((ix_rois, ix_scores.unsqueeze(1)), dim=1).data, config.DETECTION_NMS_THRESHOLD)
nms_keep = keep[ixs[order[nms_keep].data].data]
keep = intersect1d(keep, nms_keep)
elif use_nms == 1:
## Apply per-class NMS
pre_nms_class_ids = class_ids[keep.data]
pre_nms_scores = class_scores[keep.data]
pre_nms_rois = refined_rois[keep.data]
for i, class_id in enumerate(unique1d(pre_nms_class_ids)):
## Pick detections of this class
ixs = torch.nonzero(pre_nms_class_ids == class_id)[:,0]
## Sort
ix_rois = pre_nms_rois[ixs.data]
ix_scores = pre_nms_scores[ixs]
ix_scores, order = ix_scores.sort(descending=True)
ix_rois = ix_rois[order.data,:]
class_keep = nms(torch.cat((ix_rois, ix_scores.unsqueeze(1)), dim=1).data, config.DETECTION_NMS_THRESHOLD)
## Map indicies
class_keep = keep[ixs[order[class_keep].data].data]
if i==0:
nms_keep = class_keep
else:
nms_keep = unique1d(torch.cat((nms_keep, class_keep)))
keep = intersect1d(keep, nms_keep)
else:
pass
## Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
top_ids = class_scores[keep.data].sort(descending=True)[1][:roi_count]
keep = keep[top_ids.data]
#print('num detectinos', len(keep))
### Apply plane anchors
class_parameters = config.applyAnchorsTensor(class_ids, class_parameters)
## Arrange output as [N, (y1, x1, y2, x2, class_id, score, parameters)]
## Coordinates are in image domain.
result = torch.cat((refined_rois[keep.data],
class_ids[keep.data].unsqueeze(1).float(),
class_scores[keep.data].unsqueeze(1),
class_parameters[keep.data]), dim=1)
if return_indices:
ori_rois = rois * scale
ori_rois = clip_to_window(window, ori_rois)
ori_rois = torch.round(ori_rois)
ori_rois = ori_rois[keep.data]
return result, keep.data, ori_rois
return result
def detection_layer(config, rois, mrcnn_class, mrcnn_bbox, mrcnn_parameter, image_meta, return_indices=False, use_nms=1, one_hot=True):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
"""
## Currently only supports batchsize 1
rois = rois.squeeze(0)
_, _, window, _ = parse_image_meta(image_meta)
window = window[0]
if len(mrcnn_class) == 0:
if return_indices:
return torch.zeros(0), torch.zeros(0), torch.zeros(0)
else:
return torch.zeros(0)
return refine_detections(rois, mrcnn_class, mrcnn_bbox, mrcnn_parameter, window, config, return_indices=return_indices, use_nms=use_nms, one_hot=one_hot)
############################################################
# Region Proposal Network
############################################################
class RPN(nn.Module):
"""Builds the model of Region Proposal Network.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
def __init__(self, anchors_per_location, anchor_stride, depth):
super(RPN, self).__init__()
self.anchors_per_location = anchors_per_location
self.anchor_stride = anchor_stride
self.depth = depth
self.padding = SamePad2d(kernel_size=3, stride=self.anchor_stride)
self.conv_shared = nn.Conv2d(self.depth, 512, kernel_size=3, stride=self.anchor_stride)
self.relu = nn.ReLU(inplace=True)
self.conv_class = nn.Conv2d(512, 2 * anchors_per_location, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=2)
self.conv_bbox = nn.Conv2d(512, 4 * anchors_per_location, kernel_size=1, stride=1)
def forward(self, x):
## Shared convolutional base of the RPN
x = self.relu(self.conv_shared(self.padding(x)))
## Anchor Score. [batch, anchors per location * 2, height, width].
rpn_class_logits = self.conv_class(x)
## Reshape to [batch, 2, anchors]
rpn_class_logits = rpn_class_logits.permute(0,2,3,1)
rpn_class_logits = rpn_class_logits.contiguous()
rpn_class_logits = rpn_class_logits.view(x.size()[0], -1, 2)
## Softmax on last dimension of BG/FG.
rpn_probs = self.softmax(rpn_class_logits)
## Bounding box refinement. [batch, H, W, anchors per location, depth]
## where depth is [x, y, log(w), log(h)]
rpn_bbox = self.conv_bbox(x)
## Reshape to [batch, 4, anchors]
rpn_bbox = rpn_bbox.permute(0,2,3,1)
rpn_bbox = rpn_bbox.contiguous()
rpn_bbox = rpn_bbox.view(x.size()[0], -1, 4)
return [rpn_class_logits, rpn_probs, rpn_bbox]
############################################################
# Feature Pyramid Network Heads
############################################################
class Classifier(nn.Module):
def __init__(self, depth, pool_size, image_shape, num_classes, num_parameters, debug=False):
super(Classifier, self).__init__()
self.depth = depth
self.pool_size = pool_size
self.image_shape = image_shape
self.num_classes = num_classes
self.num_parameters = num_parameters
self.conv1 = nn.Conv2d(self.depth + 64, 1024, kernel_size=self.pool_size, stride=1)
self.bn1 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)
self.conv2 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1)
self.bn2 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.linear_class = nn.Linear(1024, num_classes)
self.softmax = nn.Softmax(dim=1)
self.linear_bbox = nn.Linear(1024, num_classes * 4)
self.debug = debug
if self.debug:
self.linear_parameters = nn.Linear(3, num_classes * self.num_parameters)
else:
self.linear_parameters = nn.Linear(1024, num_classes * self.num_parameters)
pass
def forward(self, x, rois, ranges, pool_features=True, gt=None):
# print('rois',len(rois), rois.size())
# print('ranges_in', len(ranges), ranges.size())
x = pyramid_roi_align([rois] + x, self.pool_size, self.image_shape)
# print('x', x.size())
ranges = coordinates_roi([rois] + [ranges, ], self.pool_size, self.image_shape)
# print('ranges', ranges.size())
roi_features = torch.cat([x, ranges], dim=1)
x = self.conv1(roi_features)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = x.view(-1,1024)
mrcnn_class_logits = self.linear_class(x)
mrcnn_probs = self.softmax(mrcnn_class_logits)
mrcnn_bbox = self.linear_bbox(x)
mrcnn_bbox = mrcnn_bbox.view(mrcnn_bbox.size()[0], -1, 4)
if self.debug:
x = gt
pass
mrcnn_parameters = self.linear_parameters(x)
if self.debug:
pass
mrcnn_parameters = mrcnn_parameters.view(mrcnn_parameters.size()[0], -1, self.num_parameters)
# print('param', mrcnn_parameters.size())
if pool_features:
return [mrcnn_class_logits, mrcnn_probs, mrcnn_bbox, mrcnn_parameters, roi_features]
else:
return [mrcnn_class_logits, mrcnn_probs, mrcnn_bbox, mrcnn_parameters]
class Mask(nn.Module):
def __init__(self, config, depth, pool_size, image_shape, num_classes):
super(Mask, self).__init__()
self.config = config
self.depth = depth
self.pool_size = pool_size
self.image_shape = image_shape
self.num_classes = num_classes
self.padding = SamePad2d(kernel_size=3, stride=1)
self.conv1 = nn.Conv2d(self.depth, 256, kernel_size=3, stride=1)
self.bn1 = nn.BatchNorm2d(256, eps=0.001)
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn2 = nn.BatchNorm2d(256, eps=0.001)
self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn3 = nn.BatchNorm2d(256, eps=0.001)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn4 = nn.BatchNorm2d(256, eps=0.001)
self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(256, num_classes + config.NUM_PARAMETER_CHANNELS, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x, rois, pool_features=True):
if pool_features:
roi_features = pyramid_roi_align([rois] + x, self.pool_size, self.image_shape)
# print('roi_features', len(roi_features), roi_features[0].size(), roi_features[1].size())
else:
roi_features = x
pass
x = self.conv1(self.padding(roi_features))
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(self.padding(x))
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(self.padding(x))
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(self.padding(x))
x = self.bn4(x)
x = self.relu(x)
x = self.deconv(x)
x = self.relu(x)
x = self.conv5(x)
if self.config.NUM_PARAMETER_CHANNELS > 0 and not self.config.OCCLUSION:
x = torch.cat([self.sigmoid(x[:, :-self.num_parameter_channels]), x[:, -self.num_parameter_channels:]], dim=1)
else:
# print('x_maks',len(x),x[0].size())
x = self.sigmoid(x)
pass
return x, roi_features
class Depth(nn.Module):
def __init__(self, num_output_channels=1):
super(Depth, self).__init__()
self.num_output_channels = num_output_channels
self.conv1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.conv3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.conv4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.conv5 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.deconv1 = nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.deconv2 = nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.deconv3 = nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.deconv4 = nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.deconv5 = nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(256, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True)
)
self.depth_pred = nn.Conv2d(64, num_output_channels, kernel_size=3, stride=1, padding=1)
self.crop = True
return
def forward(self, feature_maps):
if self.crop:
padding = 5
# print(feature_maps[0].size())
# print(feature_maps[1].size())
# print(feature_maps[2].size())
# print(feature_maps[3].size())
for c in range(2, 5):
feature_maps[c] = feature_maps[c][:, :, padding * pow(2, c - 2):-padding * pow(2, c - 2)]
continue
pass
x = self.deconv1(self.conv1(feature_maps[0]))
x = self.deconv2(torch.cat([self.conv2(feature_maps[1]), x], dim=1))
if self.crop:
x = x[:, :, 5:35]
x = self.deconv3(torch.cat([self.conv3(feature_maps[2]), x], dim=1))
x = self.deconv4(torch.cat([self.conv4(feature_maps[3]), x], dim=1))
x = self.deconv5(torch.cat([self.conv5(feature_maps[4]), x], dim=1))
x = self.depth_pred(x)
if self.crop:
x = torch.nn.functional.interpolate(x, size=(480, 640), mode='bilinear')
zeros = torch.zeros((len(x), self.num_output_channels, 80, 640)).cuda()
x = torch.cat([zeros, x, zeros], dim=2)
else:
x = torch.nn.functional.interpolate(x, size=(640, 640), mode='bilinear')
pass
return x
############################################################
# Loss Functions
############################################################
def compute_rpn_class_loss(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
## Squeeze last dim to simplify
rpn_match = rpn_match.squeeze(2)
## Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = (rpn_match == 1).long()
## Positive and Negative anchors contribute to the loss,
## but neutral anchors (match value = 0) don't.
indices = torch.nonzero(rpn_match != 0)
## Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = rpn_class_logits[indices.data[:,0],indices.data[:,1],:]
anchor_class = anchor_class[indices.data[:,0],indices.data[:,1]]
## Crossentropy loss
loss = F.cross_entropy(rpn_class_logits, anchor_class)
return loss
def compute_rpn_bbox_loss(target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
## Squeeze last dim to simplify
rpn_match = rpn_match.squeeze(2)
## Positive anchors contribute to the loss, but negative and
## neutral anchors (match value of 0 or -1) don't.
indices = torch.nonzero(rpn_match==1)
## Pick bbox deltas that contribute to the loss
rpn_bbox = rpn_bbox[indices.data[:,0],indices.data[:,1]]
## Trim target bounding box deltas to the same length as rpn_bbox.
target_bbox = target_bbox[0,:rpn_bbox.size()[0],:]
## Smooth L1 loss
loss = F.smooth_l1_loss(rpn_bbox, target_bbox)
return loss
def compute_mrcnn_class_loss(target_class_ids, pred_class_logits):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
"""
## Loss
if len(target_class_ids) > 0:
loss = F.cross_entropy(pred_class_logits,target_class_ids.long())
else:
loss = Variable(torch.FloatTensor([0]), requires_grad=False)
if target_class_ids.is_cuda:
loss = loss.cuda()
return loss
def compute_mrcnn_bbox_loss(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
if (target_class_ids > 0).sum() > 0:
## Only positive ROIs contribute to the loss. And only
## the right class_id of each ROI. Get their indicies.
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix.data].long()
indices = torch.stack((positive_roi_ix,positive_roi_class_ids), dim=1)
## Gather the deltas (predicted and true) that contribute to loss
target_bbox = target_bbox[indices[:,0].data,:]
pred_bbox = pred_bbox[indices[:,0].data,indices[:,1].data,:]
## Smooth L1 loss
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = Variable(torch.FloatTensor([0]), requires_grad=False)
if target_class_ids.is_cuda:
loss = loss.cuda()
return loss
def compute_mrcnn_mask_loss(config, target_masks, target_class_ids, target_parameters, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
if (target_class_ids > 0).sum() > 0:
## Only positive ROIs contribute to the loss. And only
## the class specific mask of each ROI.
positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_class_ids = target_class_ids[positive_ix.data].long()
indices = torch.stack((positive_ix, positive_class_ids), dim=1)
## Gather the masks (predicted and true) that contribute to loss
y_true = target_masks[indices[:,0].data,:,:]
if config.GLOBAL_MASK:
y_pred = pred_masks[indices[:,0],0,:,:]
else:
y_pred = pred_masks[indices[:,0].data,indices[:,1].data,:,:]
pass
if config.NUM_PARAMETER_CHANNELS == 1:
if config.OCCLUSION:
visible_pred = pred_masks[indices[:,0],-1,:,:]
visible_gt = y_true[:, :, :, -1]
y_true = y_true[:, :, :, 0]
loss = F.binary_cross_entropy(y_pred, y_true) + F.binary_cross_entropy(visible_pred, visible_gt)
else:
depth_pred = pred_masks[indices[:,0],-1,:,:]
depth_gt = y_true[:, :, :, -1]
y_true = y_true[:, :, :, 0]
loss = F.binary_cross_entropy(y_pred, y_true) + l1LossMask(depth_pred, depth_gt, (depth_gt > 1e-4).float())
pass
elif config.NUM_PARAMETER_CHANNELS == 4:
depth_pred = pred_masks[indices[:,0],-config.NUM_PARAMETER_CHANNELS,:,:]
depth_gt = y_true[:, :, :, -1]
y_true = y_true[:, :, :, 0]
normal_pred = pred_masks[indices[:,0],-(config.NUM_PARAMETER_CHANNELS - 1):,:,:]
normal_gt = target_parameters[indices[:,0]]
normal_gt = normal_gt / torch.clamp(torch.norm(normal_gt, dim=-1, keepdim=True), min=1e-4)
loss = F.binary_cross_entropy(y_pred, y_true) + l1LossMask(depth_pred, depth_gt, (depth_gt > 1e-4).float()) + l2NormLossMask(normal_pred, normal_gt.unsqueeze(-1).unsqueeze(-1), y_true, dim=1)
else:
## Binary cross entropy
loss = F.binary_cross_entropy(y_pred, y_true)
pass
else:
loss = Variable(torch.FloatTensor([0]), requires_grad=False)
if target_class_ids.is_cuda:
loss = loss.cuda()
return loss
def compute_mrcnn_parameter_loss(target_parameters, target_class_ids, pred_parameters):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
if (target_class_ids > 0).sum() > 0:
## Only positive ROIs contribute to the loss. And only
## the right class_id of each ROI. Get their indicies.
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix.data].long()
indices = torch.stack((positive_roi_ix,positive_roi_class_ids), dim=1)
## Gather the deltas (predicted and true) that contribute to loss
target_parameters = target_parameters[indices[:,0].data,:]
pred_parameters = pred_parameters[indices[:,0].data,indices[:,1].data,:]
## Smooth L1 loss
loss = F.smooth_l1_loss(pred_parameters, target_parameters)
else:
loss = Variable(torch.FloatTensor([0]), requires_grad=False)
if target_class_ids.is_cuda:
loss = loss.cuda()
return loss
def compute_losses(config, rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask, target_parameters, mrcnn_parameters):
rpn_class_loss = compute_rpn_class_loss(rpn_match, rpn_class_logits)
rpn_bbox_loss = compute_rpn_bbox_loss(rpn_bbox, rpn_match, rpn_pred_bbox)
mrcnn_class_loss = compute_mrcnn_class_loss(target_class_ids, mrcnn_class_logits)
mrcnn_bbox_loss = compute_mrcnn_bbox_loss(target_deltas, target_class_ids, mrcnn_bbox)
mrcnn_mask_loss = compute_mrcnn_mask_loss(config, target_mask, target_class_ids, target_parameters, mrcnn_mask)
mrcnn_parameter_loss = compute_mrcnn_parameter_loss(target_parameters, target_class_ids, mrcnn_parameters)
return [rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss, mrcnn_parameter_loss]
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN(nn.Module):
"""Encapsulates the Mask RCNN model functionality.
"""
def __init__(self, config, resnet_layers, model_dir='test'):
"""
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
super(MaskRCNN, self).__init__()
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.build(config=config)
self.initialize_weights()
self.loss_history = []
self.val_loss_history = []
self.resnet_layers = resnet_layers
def build(self, config):
"""Build Mask R-CNN architecture.
"""
## Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
## Build the shared convolutional layers.
## Bottom-up Layers
## Returns a list of the last layers of each stage, 5 in total.
## Don't create the thead (stage 5), so we pick the 4th item in the list.
resnet = ResNet("resnet101", stage5=True, numInputChannels=config.NUM_INPUT_CHANNELS)
C1, C2, C3, C4, C5 = resnet.stages()
C2, C3, C4, C5 = self.resnet_layers # Overwritting resnet layers
# print("C1",C1)
# print("C2", C2)
# print("C3", C3)
# print("C4", C4)
# print("C5", C5)
## Top-down Layers
## TODO: add assert to varify feature map sizes match what's in config
self.fpn = FPN(C1, C2, C3, C4, C5, out_channels=256, bilinear_upsampling=self.config.BILINEAR_UPSAMPLING)
## Generate Anchors
self.anchors = Variable(torch.from_numpy(utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)).float(), requires_grad=False)
if self.config.GPU_COUNT:
self.anchors = self.anchors.cuda()
## RPN
self.rpn = RPN(len(config.RPN_ANCHOR_RATIOS), config.RPN_ANCHOR_STRIDE, 256)
## Coordinate feature
self.coordinates = nn.Conv2d(3, 64, kernel_size=1, stride=1)
## FPN Classifier
self.debug = False
self.classifier = Classifier(256, config.POOL_SIZE, config.IMAGE_SHAPE, config.NUM_CLASSES, config.NUM_PARAMETERS, debug=self.debug)
## FPN Mask
self.mask = Mask(config, 256, config.MASK_POOL_SIZE, config.IMAGE_SHAPE, config.NUM_CLASSES)
if self.config.PREDICT_DEPTH:
if self.config.PREDICT_BOUNDARY:
self.depth = Depth(num_output_channels=3)
else:
self.depth = Depth(num_output_channels=1)
pass
pass
## Fix batch norm layers
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad = False
self.apply(set_bn_fix)
def initialize_weights(self):
"""Initialize model weights.
"""
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def set_trainable(self, layer_regex, model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
for param in self.named_parameters():
layer_name = param[0]
trainable = bool(re.fullmatch(layer_regex, layer_name))
if not trainable:
param[1].requires_grad = False
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
## Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
## If we have a model path with date and epochs use them
if model_path:
## Continue from we left of. Get epoch and date from the file name
## A sample model path might look like:
## /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.pth"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6))
## Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
## Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.pth".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{:04d}")
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
## Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
## Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
## Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
if os.path.exists(filepath):
state_dict = torch.load(filepath)
try:
self.load_state_dict(state_dict, strict=False)
except:
print('load only base model')
try:
state_dict = {k: v for k, v in state_dict.items() if 'classifier.linear_class' not in k and 'classifier.linear_bbox' not in k and 'mask.conv5' not in k}
state = self.state_dict()
state.update(state_dict)
self.load_state_dict(state)
except:
print('change input dimension')
state_dict = {k: v for k, v in state_dict.items() if 'classifier.linear_class' not in k and 'classifier.linear_bbox' not in k and 'mask.conv5' not in k and 'fpn.C1.0' not in k and 'classifier.conv1' not in k}
state = self.state_dict()
state.update(state_dict)
self.load_state_dict(state)
pass
pass
else:
print("Weight file not found ...")
exit(1)
## Update the log directory
self.set_log_dir(filepath)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
def detect(self, images, camera, mold_image=True, image_metas=None):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
## Mold inputs to format expected by the neural network
if mold_image:
molded_images, image_metas, windows = mold_inputs(self.config, images)
else:
molded_images = images
windows = [(0, 0, images.shape[1], images.shape[2]) for _ in range(len(images))]
pass
## Convert images to torch tensor
molded_images = torch.from_numpy(molded_images.transpose(0, 3, 1, 2)).float()
## To GPU
if self.config.GPU_COUNT:
molded_images = molded_images.cuda()
## Wrap in variable
#molded_images = Variable(molded_images, volatile=True)
## Run object detection
detections, mrcnn_mask, depth_np = self.predict([molded_images, image_metas, camera], mode='inference')
if len(detections[0]) == 0:
return [{'rois': [], 'class_ids': [], 'scores': [], 'masks': [], 'parameters': []}]
## Convert to numpy
detections = detections.data.cpu().numpy()
mrcnn_mask = mrcnn_mask.permute(0, 1, 3, 4, 2).data.cpu().numpy()
## Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks, final_parameters =\
unmold_detections(self.config, detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
"parameters": final_parameters,
})
return results
def predict(self, input, mode, use_nms=1, use_refinement=False, return_feature_map=False):
molded_images = input[0]
image_metas = input[1]
if mode == 'inference':
self.eval()
elif 'training' in mode:
self.train()
## Set batchnorm always in eval mode during training
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.apply(set_bn_eval)
## Feature extraction
#print(molded_images)
[p2_out, p3_out, p4_out, p5_out, p6_out] = self.fpn(molded_images)
## Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [p2_out, p3_out, p4_out, p5_out, p6_out]
mrcnn_feature_maps = [p2_out, p3_out, p4_out, p5_out]
# print('mrcn_feature_maps ',len(mrcnn_feature_maps))
# print('mrcn_feature_maps size ', mrcnn_feature_maps[0].size())
feature_maps = [feature_map for index, feature_map in enumerate(rpn_feature_maps[::-1])]
if self.config.PREDICT_DEPTH:
depth_np = self.depth(feature_maps)
if self.config.PREDICT_BOUNDARY:
boundary = depth_np[:, 1:]
depth_np = depth_np[:, 0]
else:
depth_np = depth_np.squeeze(1)
# print('depth_size',depth_np.size())
pass
else:
depth_np = torch.ones((1, self.config.IMAGE_MAX_DIM, self.config.IMAGE_MAX_DIM)).cuda()
pass
ranges = self.config.getRanges(input[-1]).transpose(1, 2).transpose(0, 1)
zeros = torch.zeros(3, (self.config.IMAGE_MAX_DIM - self.config.IMAGE_MIN_DIM) // 2, self.config.IMAGE_MAX_DIM).cuda()
ranges = torch.cat([zeros, ranges, zeros], dim=1)
ranges = torch.nn.functional.interpolate(ranges.unsqueeze(0), size=(160, 160), mode='bilinear')
ranges = self.coordinates(ranges * 10)
## Loop through pyramid layers
layer_outputs = [] ## list of lists
for p in rpn_feature_maps:
layer_outputs.append(self.rpn(p))
## Concatenate layer outputs
## Convert from list of lists of level outputs to list of lists
## of outputs across levels.
## e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
rpn_class_logits, rpn_class, rpn_bbox = outputs
## Generate proposals
## Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
## and zero padded.
proposal_count = self.config.POST_NMS_ROIS_TRAINING if 'training' in mode and use_refinement == False \
else self.config.POST_NMS_ROIS_INFERENCE
rpn_rois = proposal_layer([rpn_class, rpn_bbox],
proposal_count=proposal_count,
nms_threshold=self.config.RPN_NMS_THRESHOLD,
anchors=self.anchors,
config=self.config)
# print('len rois',len(rpn_rois))
# print(rpn_rois[0].size())
#print('mode', mode)
if mode == 'inference':
## Network Heads
## Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters = self.classifier(mrcnn_feature_maps, rpn_rois, ranges)
## Detections
## output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = detection_layer(self.config, rpn_rois, mrcnn_class, mrcnn_bbox, mrcnn_parameters, image_metas)
if len(detections) == 0:
return [[]], [[]], depth_np
## Convert boxes to normalized coordinates
## TODO: let DetectionLayer return normalized coordinates to avoid
## unnecessary conversions
h, w = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
detection_boxes = detections[:, :4] / scale
## Add back batch dimension
detection_boxes = detection_boxes.unsqueeze(0)
## Create masks for detections
mrcnn_mask, roi_features = self.mask(mrcnn_feature_maps, detection_boxes)
## Add back batch dimension
detections = detections.unsqueeze(0)
mrcnn_mask = mrcnn_mask.unsqueeze(0)
return [detections, mrcnn_mask, depth_np]
elif mode == 'training':
gt_class_ids = input[2]
gt_boxes = input[3]
gt_masks = input[4]
gt_parameters = input[5]
## Normalize coordinates
h, w = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
gt_boxes = gt_boxes / scale
## Generate detection targets
## Subsamples proposals and generates target outputs for training
## Note that proposal class IDs, gt_boxes, and gt_masks are zero
## padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_deltas, target_mask, target_parameters = \
detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, gt_parameters, self.config)
if len(rois) == 0:
mrcnn_class_logits = Variable(torch.FloatTensor())
mrcnn_class = Variable(torch.IntTensor())
mrcnn_bbox = Variable(torch.FloatTensor())
mrcnn_mask = Variable(torch.FloatTensor())
mrcnn_parameters = Variable(torch.FloatTensor())
if self.config.GPU_COUNT:
mrcnn_class_logits = mrcnn_class_logits.cuda()
mrcnn_class = mrcnn_class.cuda()
mrcnn_bbox = mrcnn_bbox.cuda()
mrcnn_mask = mrcnn_mask.cuda()
mrcnn_parameters = mrcnn_parameters.cuda()
else:
## Network Heads
## Proposal classifier and BBox regressor heads
#print([maps.shape for maps in mrcnn_feature_maps], target_parameters.shape)
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters = self.classifier(mrcnn_feature_maps, rois, ranges, target_parameters)
## Create masks for detections
mrcnn_mask, _ = self.mask(mrcnn_feature_maps, rois)
return [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask, target_parameters, mrcnn_parameters, rois, depth_np]
elif mode in ['training_detection', 'inference_detection']:
gt_class_ids = input[2]
gt_boxes = input[3]
gt_masks = input[4]
gt_parameters = input[5]
## Normalize coordinates
h, w = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
gt_boxes = gt_boxes / scale
## Generate detection targets
## Subsamples proposals and generates target outputs for training
## Note that proposal class IDs, gt_boxes, and gt_masks are zero
## padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_deltas, target_mask, target_parameters = \
detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, gt_parameters, self.config)
if len(rois) == 0:
mrcnn_class_logits = Variable(torch.FloatTensor())
mrcnn_class = Variable(torch.IntTensor())
mrcnn_bbox = Variable(torch.FloatTensor())
mrcnn_mask = Variable(torch.FloatTensor())
mrcnn_parameters = Variable(torch.FloatTensor())
if self.config.GPU_COUNT:
mrcnn_class_logits = mrcnn_class_logits.cuda()
mrcnn_class = mrcnn_class.cuda()
mrcnn_bbox = mrcnn_bbox.cuda()
mrcnn_mask = mrcnn_mask.cuda()
mrcnn_parameters = mrcnn_parameters.cuda()
else:
## Network Heads
## Proposal classifier and BBox regressor heads
# print('rois1', len(rois), rois[0].size())
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters, roi_features = self.classifier(mrcnn_feature_maps, rois, ranges, pool_features=True)
## Create masks for detections
mrcnn_mask, _ = self.mask(mrcnn_feature_maps, rois)
pass
h, w = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
if use_refinement:
# print('rois2', len(rpn_rois), rpn_rois[0].size())
mrcnn_class_logits_final, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final, roi_features = self.classifier(mrcnn_feature_maps, rpn_rois[0], ranges, pool_features=True)
## Add back batch dimension
## Create masks for detections
detections, indices, _ = detection_layer(self.config, rpn_rois, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final, image_metas, return_indices=True, use_nms=use_nms)
if len(detections) > 0:
detection_boxes = detections[:, :4] / scale
detection_boxes = detection_boxes.unsqueeze(0)
# print('fm', len(mrcnn_feature_maps), mrcnn_feature_maps[0].size())
# print('len_det_box', len(detection_boxes),detection_boxes[0].size())
detection_masks, _ = self.mask(mrcnn_feature_maps, detection_boxes)
# print('det_masks', len(detection_masks), detection_masks[16].size())
roi_features = roi_features[indices]
pass
else:
mrcnn_class_logits_final, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final = mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters
rpn_rois = rois
detections, indices, _ = detection_layer(self.config, rpn_rois, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final, image_metas, return_indices=True, use_nms=use_nms)
if len(detections) > 0:
detection_boxes = detections[:, :4] / scale
detection_boxes = detection_boxes.unsqueeze(0)
detection_masks, _ = self.mask(mrcnn_feature_maps, detection_boxes)
roi_features = roi_features[indices]
pass
pass
valid = False
if len(detections) > 0:
positive_rois = detection_boxes.squeeze(0)
gt_class_ids = gt_class_ids.squeeze(0)
gt_boxes = gt_boxes.squeeze(0)
gt_masks = gt_masks.squeeze(0)
gt_parameters = gt_parameters.squeeze(0)
## Compute overlaps matrix [proposals, gt_boxes]
overlaps = bbox_overlaps(positive_rois, gt_boxes)
## Determine postive and negative ROIs
roi_iou_max = torch.max(overlaps, dim=1)[0]
## 1. Positive ROIs are those with >= 0.5 IoU with a GT box
if 'inference' in mode:
positive_roi_bool = roi_iou_max > -1
else:
positive_roi_bool = roi_iou_max > 0.2
pass
detections = detections[positive_roi_bool]
# print('type', type(positive_roi_bool))
# print(len(positive_roi_bool))
# print(positive_roi_bool)
detection_masks = detection_masks[positive_roi_bool]
roi_features = roi_features[positive_roi_bool]
if len(detections) > 0:
positive_indices = torch.nonzero(positive_roi_bool)[:, 0]
positive_rois = positive_rois[positive_indices.data]
## Assign positive ROIs to GT boxes.
positive_overlaps = overlaps[positive_indices.data,:]
roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]
roi_gt_boxes = gt_boxes[roi_gt_box_assignment.data,:]
roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment.data]
roi_gt_parameters = gt_parameters[roi_gt_box_assignment.data]
roi_gt_parameters = self.config.applyAnchorsTensor(roi_gt_class_ids.long(), roi_gt_parameters)
## Assign positive ROIs to GT masks
roi_gt_masks = gt_masks[roi_gt_box_assignment.data,:,:]
valid_mask = positive_overlaps.max(0)[1]
valid_mask = (valid_mask[roi_gt_box_assignment] == torch.arange(len(roi_gt_box_assignment)).long().cuda()).long()
roi_indices = roi_gt_box_assignment * valid_mask + (-1) * (1 - valid_mask)
## Compute mask targets
boxes = positive_rois
if self.config.USE_MINI_MASK:
## Transform ROI corrdinates from normalized image space
## to normalized mini-mask space.
y1, x1, y2, x2 = positive_rois.chunk(4, dim=1)
gt_y1, gt_x1, gt_y2, gt_x2 = roi_gt_boxes.chunk(4, dim=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = torch.cat([y1, x1, y2, x2], dim=1)
pass
box_ids = Variable(torch.arange(roi_gt_masks.size()[0]), requires_grad=False).int()
if self.config.GPU_COUNT:
box_ids = box_ids.cuda()
roi_gt_masks = Variable(CropAndResizeFunction(self.config.FINAL_MASK_SHAPE[0], self.config.FINAL_MASK_SHAPE[1], 0)(roi_gt_masks.unsqueeze(1), boxes, box_ids).data, requires_grad=False)
roi_gt_masks = roi_gt_masks.squeeze(1)
roi_gt_masks = torch.round(roi_gt_masks)
valid = True
pass
pass
if not valid:
detections = torch.FloatTensor()
detection_masks = torch.FloatTensor()
roi_gt_parameters = torch.FloatTensor()
roi_gt_masks = torch.FloatTensor()
roi_features = torch.FloatTensor()
roi_indices = torch.LongTensor()
if self.config.GPU_COUNT:
detections = detections.cuda()
detection_masks = detection_masks.cuda()
roi_gt_parameters = roi_gt_parameters.cuda()
roi_gt_masks = roi_gt_masks.cuda()
roi_features = roi_features.cuda()
roi_indices = roi_indices.cuda()
pass
pass
info = [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask,
mrcnn_mask, target_parameters, mrcnn_parameters, detections, detection_masks, roi_gt_parameters, roi_gt_masks,
rpn_rois, roi_features, roi_indices]
if return_feature_map:
feature_map = mrcnn_feature_maps
info.append(feature_map)
pass
# print('depth_np_source',len(depth_np),depth_np.size())
info.append(depth_np)
if self.config.PREDICT_BOUNDARY:
info.append(boundary)
pass
return info
############################################################
# Data Formatting
############################################################
def mold_inputs(config, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
## Resize image to fit the model expected size
## TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
molded_image = mold_image(molded_image, config)
## Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([config.NUM_CLASSES], dtype=np.int32))
## Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
## Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(config, detections, mrcnn_mask, image_shape, window, debug=False):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
## How many detections do we have?
## Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
## Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
parameters = detections[:N, 6:]
if config.GLOBAL_MASK:
masks = mrcnn_mask[np.arange(N), :, :, 0]
else:
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
pass
## Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] ## y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
## Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
if debug:
print(masks.shape, boxes.shape)
for maskIndex, mask in enumerate(masks):
print(maskIndex, boxes[maskIndex].astype(np.int32))
cv2.imwrite('test/local_mask_' + str(maskIndex) + '.png', (mask * 255).astype(np.uint8))
continue
## Filter out detections with zero area. Often only happens in early
## stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
parameters = np.delete(parameters, exclude_ix, axis=0)
N = class_ids.shape[0]
## Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
## Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
if debug:
print(full_masks.shape)
for maskIndex in range(full_masks.shape[2]):
cv2.imwrite('test/full_mask_' + str(maskIndex) + '.png', (full_masks[:, :, maskIndex] * 255).astype(np.uint8))
continue
pass
return boxes, class_ids, scores, full_masks, parameters
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array. Use
parse_image_meta() to parse the values back.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + ## size=1
list(image_shape) + ## size=3
list(window) + ## size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) ## size=num_classes
)
return meta
## Two functions (for Numpy and TF) to parse image_meta tensors.
def parse_image_meta(meta):
"""Parses an image info Numpy array to its components.
See compose_image_meta() for more details.
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] ## (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return image_id, image_shape, window, active_class_ids
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8]
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
| [
"torch.nn.Linear",
"torch.round",
"torch.cat",
"torch.nn.modules.utils._pair",
"torch.stack",
"torch.nn.functional.smooth_l1_loss",
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.nn.functional.cross_entropy",
"torch.LongTensor",
"torch.load",
"torch.nn.functional.pad",
"torch.exp",
"torch.sqrt",
"torch.nn.Softmax",
"torch.nn.MaxPool2d",
"torch.IntTensor",
"torch.norm",
"torch.FloatTensor",
"torch.nn.ConvTranspose2d",
"torch.ByteTensor",
"torch.zeros",
"torch.nonzero",
"torch.min",
"torch.max",
"torch.nn.Sequential",
"torch.nn.functional.upsample",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.log",
"torch.sort",
"torch.nn.functional.binary_cross_entropy",
"torch.nn.Sigmoid",
"torch.nn.init.xavier_uniform",
"torch.nn.functional.interpolate",
"torch.from_numpy",
"torch.nn.Upsample"
] | 1.6.0 | eva5covergence/EVA5_AI_Projects | 7052373c52b6b9901cd0bc05a4758dd4b63f7480 |
1.9 | from typing import Optional
import torch
from torch.nn import functional as F
def aa_to_rotmat(theta: torch.Tensor):
"""
Convert axis-angle representation to rotation matrix.
Works by first converting it to a quaternion.
Args:
theta (torch.Tensor): Tensor of shape (B, 3) containing axis-angle representations.
Returns:
torch.Tensor: Corresponding rotation matrices with shape (B, 3, 3).
"""
norm = torch.norm(theta + 1e-8, p = 2, dim = 1)
angle = torch.unsqueeze(norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim = 1)
return quat_to_rotmat(quat)
def quat_to_rotmat(quat: torch.Tensor) -> torch.Tensor:
"""
Convert quaternion representation to rotation matrix.
Args:
quat (torch.Tensor) of shape (B, 4); 4 <===> (w, x, y, z).
Returns:
torch.Tensor: Corresponding rotation matrices with shape (B, 3, 3).
"""
norm_quat = quat
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def rot6d_to_rotmat(x: torch.Tensor) -> torch.Tensor:
"""
Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Args:
x (torch.Tensor): (B,6) Batch of 6-D rotation representations.
Returns:
torch.Tensor: Batch of corresponding rotation matrices with shape (B,3,3).
"""
x = x.reshape(-1,2,3).permute(0, 2, 1).contiguous()
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1)
def perspective_projection(points: torch.Tensor,
translation: torch.Tensor,
focal_length: torch.Tensor,
camera_center: Optional[torch.Tensor] = None,
rotation: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Computes the perspective projection of a set of 3D points.
Args:
points (torch.Tensor): Tensor of shape (B, N, 3) containing the input 3D points.
translation (torch.Tensor): Tensor of shape (B, 3) containing the 3D camera translation.
focal_length (torch.Tensor): Tensor of shape (B, 2) containing the focal length in pixels.
camera_center (torch.Tensor): Tensor of shape (B, 2) containing the camera center in pixels.
rotation (torch.Tensor): Tensor of shape (B, 3, 3) containing the camera rotation.
Returns:
torch.Tensor: Tensor of shape (B, N, 2) containing the projection of the input points.
"""
batch_size = points.shape[0]
if rotation is None:
rotation = torch.eye(3, device=points.device, dtype=points.dtype).unsqueeze(0).expand(batch_size, -1, -1)
if camera_center is None:
camera_center = torch.zeros(batch_size, 2, device=points.device, dtype=points.dtype)
# Populate intrinsic camera matrix K.
K = torch.zeros([batch_size, 3, 3], device=points.device, dtype=points.dtype)
K[:,0,0] = focal_length[:,0]
K[:,1,1] = focal_length[:,1]
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1] | [
"torch.zeros",
"torch.cos",
"torch.cat",
"torch.nn.functional.normalize",
"torch.stack",
"torch.sin",
"torch.einsum",
"torch.norm",
"torch.unsqueeze",
"torch.eye",
"torch.cross",
"torch.div"
] | 1.9.0 | michael-p-sachen/ProHMR | 0167d05a9a45939a217d02b4ef8fd67977c15f82 |
1.0 | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
import importlib
import logging
import os
import re
import sys
import time
import traceback
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
import scipy.sparse
import sklearn.pipeline
from sklearn.metrics import accuracy_score, check_scoring, log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.utils.metaestimators import _safe_split
import lale.datasets.data_schemas
try:
import torch
torch_installed = True
except ImportError:
torch_installed = False
from importlib import util
spark_loader = util.find_spec("pyspark")
spark_installed = spark_loader is not None
if spark_installed:
from pyspark.sql.dataframe import DataFrame as spark_df
logger = logging.getLogger(__name__)
LALE_NESTED_SPACE_KEY = "__lale_nested_space"
def make_nested_hyperopt_space(sub_space):
return {LALE_NESTED_SPACE_KEY: sub_space}
def assignee_name(level=1) -> Optional[str]:
tb = traceback.extract_stack()
file_name, line_number, function_name, text = tb[-(level + 2)]
try:
tree = ast.parse(text, file_name)
except SyntaxError:
return None
assert tree is not None and isinstance(tree, ast.Module)
if len(tree.body) == 1:
stmt = tree.body[0]
if isinstance(stmt, ast.Assign):
lhs = stmt.targets
if len(lhs) == 1:
res = lhs[0]
if isinstance(res, ast.Name):
return res.id
return None
def arg_name(pos=0, level=1) -> Optional[str]:
tb = traceback.extract_stack()
file_name, line_number, function_name, text = tb[-(level + 2)]
try:
tree = ast.parse(text, file_name)
except SyntaxError:
return None
assert tree is not None and isinstance(tree, ast.Module)
if len(tree.body) == 1:
stmt = tree.body[0]
if isinstance(stmt, ast.Expr):
expr = stmt.value
if isinstance(expr, ast.Call):
args = expr.args
if pos < len(args):
res = args[pos]
if isinstance(res, ast.Name):
return res.id
return None
def data_to_json(data, subsample_array: bool = True) -> Union[list, dict, int, float]:
if type(data) is tuple:
# convert to list
return [data_to_json(elem, subsample_array) for elem in data]
if type(data) is list:
return [data_to_json(elem, subsample_array) for elem in data]
elif type(data) is dict:
return {key: data_to_json(data[key], subsample_array) for key in data}
elif isinstance(data, np.ndarray):
return ndarray_to_json(data, subsample_array)
elif type(data) is scipy.sparse.csr_matrix:
return ndarray_to_json(data.toarray(), subsample_array)
elif isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
np_array = data.values
return ndarray_to_json(np_array, subsample_array)
elif torch_installed and isinstance(data, torch.Tensor):
np_array = data.detach().numpy()
return ndarray_to_json(np_array, subsample_array)
elif isinstance(data, (np.int64, np.int32, np.int16)): # type: ignore
return int(data)
elif isinstance(data, (np.float32, np.float64)): # type: ignore
return float(data)
else:
return data
def is_empty_dict(val) -> bool:
return isinstance(val, dict) and len(val) == 0
def dict_without(orig_dict: Dict[str, Any], key: str) -> Dict[str, Any]:
return {k: orig_dict[k] for k in orig_dict if k != key}
def json_lookup(ptr, jsn, default=None):
steps = ptr.split("/")
sub_jsn = jsn
for s in steps:
if s not in sub_jsn:
return default
sub_jsn = sub_jsn[s]
return sub_jsn
def ndarray_to_json(arr: np.ndarray, subsample_array: bool = True) -> Union[list, dict]:
# sample 10 rows and no limit on columns
num_subsamples: List[int]
if subsample_array:
num_subsamples = [10, np.iinfo(int).max, np.iinfo(int).max]
else:
num_subsamples = [
np.iinfo(int).max,
np.iinfo(int).max,
np.iinfo(int).max,
]
def subarray_to_json(indices: Tuple[int, ...]) -> Any:
if len(indices) == len(arr.shape):
if (
isinstance(arr[indices], bool)
or isinstance(arr[indices], int)
or isinstance(arr[indices], float)
or isinstance(arr[indices], str)
):
return arr[indices]
elif np.issubdtype(arr.dtype, np.bool_):
return bool(arr[indices])
elif np.issubdtype(arr.dtype, np.integer):
return int(arr[indices])
elif np.issubdtype(arr.dtype, np.number):
return float(arr[indices])
elif arr.dtype.kind in ["U", "S", "O"]:
return str(arr[indices])
else:
raise ValueError(
f"Unexpected dtype {arr.dtype}, "
f"kind {arr.dtype.kind}, "
f"type {type(arr[indices])}."
)
else:
assert len(indices) < len(arr.shape)
return [
subarray_to_json(indices + (i,))
for i in range(
min(num_subsamples[len(indices)], arr.shape[len(indices)])
)
]
return subarray_to_json(())
def split_with_schemas(estimator, all_X, all_y, indices, train_indices=None):
subset_X, subset_y = _safe_split(estimator, all_X, all_y, indices, train_indices)
if hasattr(all_X, "json_schema"):
n_rows = subset_X.shape[0]
schema = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": all_X.json_schema["items"],
}
lale.datasets.data_schemas.add_schema(subset_X, schema)
if hasattr(all_y, "json_schema"):
n_rows = subset_y.shape[0]
schema = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": all_y.json_schema["items"],
}
lale.datasets.data_schemas.add_schema(subset_y, schema)
return subset_X, subset_y
def fold_schema(X, y, cv=1, is_classifier=True):
def fold_schema_aux(data, n_rows):
orig_schema = lale.datasets.data_schemas.to_schema(data)
aux_result = {**orig_schema, "minItems": n_rows, "maxItems": n_rows}
return aux_result
n_splits = cv if isinstance(cv, int) else cv.get_n_splits()
try:
n_samples = X.shape[0] if hasattr(X, "shape") else len(X)
except TypeError: # raised for Spark dataframes.
n_samples = X.count() if hasattr(X, "count") else 0
if n_splits == 1:
n_rows_fold = n_samples
elif is_classifier:
n_classes = len(set(y))
n_rows_unstratified = (n_samples // n_splits) * (n_splits - 1)
# in stratified case, fold sizes can differ by up to n_classes
n_rows_fold = max(1, n_rows_unstratified - n_classes)
else:
n_rows_fold = (n_samples // n_splits) * (n_splits - 1)
schema_X = fold_schema_aux(X, n_rows_fold)
schema_y = fold_schema_aux(y, n_rows_fold)
result = {"properties": {"X": schema_X, "y": schema_y}}
return result
def cross_val_score_track_trials(
estimator,
X,
y=None,
scoring=accuracy_score,
cv=5,
args_to_scorer=None,
args_to_cv=None,
**fit_params,
):
"""
Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on
each of the splits.
Parameters
----------
estimator: A valid sklearn_wrapper estimator
X, y: Valid data and target values that work with the estimator
scoring: string or a scorer object created using
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer.
A string from sklearn.metrics.SCORERS.keys() can be used or a scorer created from one of
sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics).
A completely custom scorer object can be created from a python function following the example at
https://scikit-learn.org/stable/modules/model_evaluation.html
The metric has to return a scalar value,
cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.
Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.
Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.
args_to_scorer: A dictionary of additional keyword arguments to pass to the scorer.
Used for cases where the scorer has a signature such as ``scorer(estimator, X, y, **kwargs)``.
args_to_cv: A dictionary of additional keyword arguments to pass to the split method of cv.
This is only applicable when cv is not an integer.
Returns
-------
cv_results: a list of scores corresponding to each cross validation fold
"""
if isinstance(cv, int):
cv = StratifiedKFold(cv)
if args_to_scorer is None:
args_to_scorer = {}
if args_to_cv is None:
args_to_cv = {}
scorer = check_scoring(estimator, scoring=scoring)
cv_results: List[float] = []
log_loss_results = []
time_results = []
for train, test in cv.split(X, y, **args_to_cv):
X_train, y_train = split_with_schemas(estimator, X, y, train)
X_test, y_test = split_with_schemas(estimator, X, y, test, train)
start = time.time()
# Not calling sklearn.base.clone() here, because:
# (1) For Lale pipelines, clone() calls the pipeline constructor
# with edges=None, so the resulting topology is incorrect.
# (2) For Lale individual operators, the fit() method already
# clones the impl object, so cloning again is redundant.
trained = estimator.fit(X_train, y_train, **fit_params)
score_value = scorer(trained, X_test, y_test, **args_to_scorer)
execution_time = time.time() - start
# not all estimators have predict probability
try:
y_pred_proba = trained.predict_proba(X_test)
logloss = log_loss(y_true=y_test, y_pred=y_pred_proba)
log_loss_results.append(logloss)
except BaseException:
logger.debug("Warning, log loss cannot be computed")
cv_results.append(score_value)
time_results.append(execution_time)
result = (
np.array(cv_results).mean(),
np.array(log_loss_results).mean(),
np.array(time_results).mean(),
)
return result
def cross_val_score(estimator, X, y=None, scoring=accuracy_score, cv=5):
"""
Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on
each of the splits.
Parameters
----------
estimator: A valid sklearn_wrapper estimator
X, y: Valid data and target values that work with the estimator
scoring: a scorer object from sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics)
Default value is accuracy_score.
cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.
Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.
Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.
Returns
-------
cv_results: a list of scores corresponding to each cross validation fold
"""
if isinstance(cv, int):
cv = StratifiedKFold(cv)
cv_results = []
for train, test in cv.split(X, y):
X_train, y_train = split_with_schemas(estimator, X, y, train)
X_test, y_test = split_with_schemas(estimator, X, y, test, train)
trained_estimator = estimator.fit(X_train, y_train)
predicted_values = trained_estimator.predict(X_test)
cv_results.append(scoring(y_test, predicted_values))
return cv_results
def create_individual_op_using_reflection(class_name, operator_name, param_dict):
instance = None
if class_name is not None:
class_name_parts = class_name.split(".")
assert (
len(class_name_parts)
) > 1, (
"The class name needs to be fully qualified, i.e. module name + class name"
)
module_name = ".".join(class_name_parts[0:-1])
class_name = class_name_parts[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
if param_dict is None:
instance = class_()
else:
instance = class_(**param_dict)
return instance
if TYPE_CHECKING:
import lale.operators
def to_graphviz(
lale_operator: "lale.operators.Operator",
ipython_display: bool = True,
call_depth: int = 1,
**dot_graph_attr,
):
import lale.json_operator
import lale.operators
import lale.visualize
if not isinstance(lale_operator, lale.operators.Operator):
raise TypeError("The input to to_graphviz needs to be a valid LALE operator.")
jsn = lale.json_operator.to_json(lale_operator, call_depth=call_depth + 1)
dot = lale.visualize.json_to_graphviz(jsn, ipython_display, dot_graph_attr)
return dot
def println_pos(message, out_file=sys.stdout):
tb = traceback.extract_stack()[-2]
match = re.search(r"<ipython-input-([0-9]+)-", tb[0])
if match:
pos = "notebook cell [{}] line {}".format(match[1], tb[1])
else:
pos = "{}:{}".format(tb[0], tb[1])
strtime = time.strftime("%Y-%m-%d_%H-%M-%S")
to_log = "{}: {} {}".format(pos, strtime, message)
print(to_log, file=out_file)
if match:
os.system("echo {}".format(to_log))
def instantiate_from_hyperopt_search_space(obj_hyperparams, new_hyperparams):
if isinstance(new_hyperparams, dict) and LALE_NESTED_SPACE_KEY in new_hyperparams:
sub_params = new_hyperparams[LALE_NESTED_SPACE_KEY]
sub_op = obj_hyperparams
if isinstance(sub_op, list):
if len(sub_op) == 1:
sub_op = sub_op[0]
else:
step_index, step_params = list(sub_params)[0]
if step_index < len(sub_op):
sub_op = sub_op[step_index]
sub_params = step_params
return create_instance_from_hyperopt_search_space(sub_op, sub_params)
elif isinstance(new_hyperparams, (list, tuple)):
assert isinstance(obj_hyperparams, (list, tuple))
params_len = len(new_hyperparams)
assert params_len == len(obj_hyperparams)
res: Optional[List[Any]] = None
for i in range(params_len):
nhi = new_hyperparams[i]
ohi = obj_hyperparams[i]
updated_params = instantiate_from_hyperopt_search_space(ohi, nhi)
if updated_params is not None:
if res is None:
res = list(new_hyperparams)
res[i] = updated_params
if res is not None:
if isinstance(obj_hyperparams, tuple):
return tuple(res)
else:
return res
# workaround for what seems to be a hyperopt bug
# where hyperopt returns a tuple even though the
# hyperopt search space specifies a list
is_obj_tuple = isinstance(obj_hyperparams, tuple)
is_new_tuple = isinstance(new_hyperparams, tuple)
if is_obj_tuple != is_new_tuple:
if is_obj_tuple:
return tuple(new_hyperparams)
else:
return list(new_hyperparams)
return None
elif isinstance(new_hyperparams, dict):
assert isinstance(obj_hyperparams, dict)
for k, sub_params in new_hyperparams.items():
if k in obj_hyperparams:
sub_op = obj_hyperparams[k]
updated_params = instantiate_from_hyperopt_search_space(
sub_op, sub_params
)
if updated_params is not None:
new_hyperparams[k] = updated_params
return None
else:
return None
def create_instance_from_hyperopt_search_space(
lale_object, hyperparams
) -> "lale.operators.Operator":
"""
Hyperparams is a n-tuple of dictionaries of hyper-parameters, each
dictionary corresponds to an operator in the pipeline
"""
# lale_object can either be an individual operator, a pipeline or an operatorchoice
# Validate that the number of elements in the n-tuple is the same
# as the number of steps in the current pipeline
from lale.operators import (
BasePipeline,
OperatorChoice,
PlannedIndividualOp,
TrainableOperator,
TrainablePipeline,
)
if isinstance(lale_object, PlannedIndividualOp):
new_hyperparams: Dict[str, Any] = dict_without(hyperparams, "name")
hps = lale_object.hyperparams()
if hps:
obj_hyperparams = dict(hps)
else:
obj_hyperparams = {}
for k, sub_params in new_hyperparams.items():
if k in obj_hyperparams:
sub_op = obj_hyperparams[k]
updated_params = instantiate_from_hyperopt_search_space(
sub_op, sub_params
)
if updated_params is not None:
new_hyperparams[k] = updated_params
all_hyperparams = {**obj_hyperparams, **new_hyperparams}
return lale_object(**all_hyperparams)
elif isinstance(lale_object, BasePipeline):
steps = lale_object.steps_list()
if len(hyperparams) != len(steps):
raise ValueError(
"The number of steps in the hyper-parameter space does not match the number of steps in the pipeline."
)
op_instances = []
edges = lale_object.edges()
# op_map:Dict[PlannedOpType, TrainableOperator] = {}
op_map = {}
for op_index, sub_params in enumerate(hyperparams):
sub_op = steps[op_index]
op_instance = create_instance_from_hyperopt_search_space(sub_op, sub_params)
assert isinstance(op_instance, TrainableOperator)
assert (
isinstance(sub_op, OperatorChoice)
or sub_op.class_name() == op_instance.class_name()
), f"sub_op {sub_op.class_name()}, op_instance {op_instance.class_name()}"
op_instances.append(op_instance)
op_map[sub_op] = op_instance
# trainable_edges:List[Tuple[TrainableOperator, TrainableOperator]]
try:
trainable_edges = [(op_map[x], op_map[y]) for (x, y) in edges]
except KeyError as e:
raise ValueError(
"An edge was found with an endpoint that is not a step (" + str(e) + ")"
)
return TrainablePipeline(op_instances, trainable_edges, ordered=True) # type: ignore
elif isinstance(lale_object, OperatorChoice):
# Hyperopt search space for an OperatorChoice is generated as a dictionary with a single element
# corresponding to the choice made, the only key is the index of the step and the value is
# the params corresponding to that step.
step_index: int
choices = lale_object.steps_list()
if len(choices) == 1:
step_index = 0
else:
step_index_str, hyperparams = list(hyperparams.items())[0]
step_index = int(step_index_str)
step_object = choices[step_index]
return create_instance_from_hyperopt_search_space(step_object, hyperparams)
else:
assert False, f"Unknown operator type: {type(lale_object)}"
def import_from_sklearn_pipeline(sklearn_pipeline, fitted=True, is_hyperparam=False):
# For all pipeline steps, identify equivalent lale wrappers if present,
# if not, call make operator on sklearn classes and create a lale pipeline.
# For higher order operators, we allow hyperparameters to be trainable even with
# fitted is True. This is achieved using the is_hyperparam flag.
def find_lale_wrapper(sklearn_obj):
module_names = [
"lale.lib.sklearn",
"lale.lib.autoai_libs",
"lale.lib.xgboost",
"lale.lib.lightgbm",
"lale.lib.snapml",
]
try:
import autoai_ts_libs # type: ignore # noqa
module_names.append("lale.lib.autoai_ts_libs")
except ImportError:
pass
lale_wrapper_found = False
class_name = sklearn_obj.__class__.__name__
for module_name in module_names:
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError:
continue
try:
class_ = getattr(module, class_name)
lale_wrapper_found = True
break
except AttributeError:
continue
else:
return lale_wrapper_found, sklearn_obj
return lale_wrapper_found, class_
import lale.operators
import lale.type_checking
sklearn_obj = sklearn_pipeline
if isinstance(sklearn_obj, lale.operators.TrainableIndividualOp) and fitted:
if hasattr(sklearn_obj, "_trained"):
return sklearn_obj._trained
elif is_hyperparam or not hasattr(
sklearn_obj._impl_instance(), "fit"
): # Operators such as NoOp do not have a fit, so return them as is.
return sklearn_obj
else:
raise ValueError(
f"""The input pipeline has an operator {sklearn_obj} that is not trained and fitted is set to True,
please pass fitted=False if you want a trainable pipeline as output."""
)
elif isinstance(sklearn_obj, lale.operators.Operator):
return sklearn_obj
if isinstance(sklearn_pipeline, sklearn.pipeline.Pipeline):
nested_pipeline_steps = sklearn_pipeline.named_steps
nested_pipeline_lale_named_steps = [
(
nested_pipeline_step[0],
import_from_sklearn_pipeline(
nested_pipeline_step[1], fitted=fitted, is_hyperparam=is_hyperparam
),
)
for nested_pipeline_step in nested_pipeline_steps.items()
]
if type(sklearn_pipeline) == sklearn.pipeline.Pipeline:
nested_pipeline_lale_objects = [
nested_pipeline_lale_named_step[1]
for nested_pipeline_lale_named_step in nested_pipeline_lale_named_steps
]
lale_op_obj = lale.operators.make_pipeline(*nested_pipeline_lale_objects)
else:
lale_wrapper_found, wrapper_class = find_lale_wrapper(sklearn_pipeline)
if lale_wrapper_found:
# This is a custom subclass of sklearn pipeline, so use the wrapper class
# instead of creating a lale pipeline
# We assume it has a hyperparameter `steps`.
if (
not fitted
): # If fitted is False, we do not want to return a Trained operator.
lale_op = wrapper_class
else:
lale_op = lale.operators.TrainedIndividualOp(
wrapper_class._name,
wrapper_class._impl,
wrapper_class._schemas,
None,
_lale_trained=True,
)
lale_op_obj = lale_op(steps=nested_pipeline_lale_named_steps)
else: # no conversion to lale if a wrapper is not found for a subclass of pipeline
return sklearn_pipeline
elif isinstance(sklearn_pipeline, sklearn.pipeline.FeatureUnion):
transformer_list = sklearn_pipeline.transformer_list
concat_predecessors = [
import_from_sklearn_pipeline(
transformer[1], fitted=fitted, is_hyperparam=is_hyperparam
)
for transformer in transformer_list
]
lale_op_obj = lale.operators.make_union(*concat_predecessors)
else:
# Validate that the sklearn_obj is a valid sklearn-compatible object
if sklearn_obj is None or not hasattr(sklearn_obj, "get_params"):
raise ValueError(
f"The input pipeline has a step {sklearn_obj} that is not scikit-learn compatible."
)
orig_hyperparams = sklearn_obj.get_params(deep=False)
higher_order = False
for hp_name, hp_val in orig_hyperparams.items():
higher_order = higher_order or hasattr(hp_val, "get_params")
if higher_order:
hyperparams = {}
for hp_name, hp_val in orig_hyperparams.items():
if hasattr(hp_val, "get_params"):
nested_op = import_from_sklearn_pipeline(
hp_val, fitted, is_hyperparam=True
) # allow nested_op to be trainable
hyperparams[hp_name] = nested_op
else:
hyperparams[hp_name] = hp_val
else:
hyperparams = orig_hyperparams
lale_wrapper_found, class_ = find_lale_wrapper(sklearn_obj)
if not lale_wrapper_found:
return class_ # Return the original object
if (
not fitted
): # If fitted is False, we do not want to return a Trained operator.
lale_op = class_
else:
lale_op = lale.operators.TrainedIndividualOp(
class_._name, class_._impl, class_._schemas, None, _lale_trained=True
)
class_ = lale_op(**hyperparams)
lale_op_obj = class_
if lale_wrapper_found and hasattr(class_._impl_instance(), "_wrapped_model"):
wrapped_model = copy.deepcopy(sklearn_obj)
class_._impl_instance()._wrapped_model = wrapped_model
else: # If there is no lale wrapper, there is no _wrapped_model
class_._impl = copy.deepcopy(sklearn_obj)
class_._impl_class_ = class_._impl.__class__
lale_op_obj = class_
return lale_op_obj
class val_wrapper:
"""This is used to wrap values that cause problems for hyper-optimizer backends
lale will unwrap these when given them as the value of a hyper-parameter"""
def __init__(self, base):
self._base = base
def unwrap_self(self):
return self._base
@classmethod
def unwrap(cls, obj):
if isinstance(obj, cls):
return cls.unwrap(obj.unwrap_self())
else:
return obj
def append_batch(data, batch_data):
if data is None:
return batch_data
elif isinstance(data, np.ndarray):
if isinstance(batch_data, np.ndarray):
if len(data.shape) == 1 and len(batch_data.shape) == 1:
return np.concatenate([data, batch_data])
else:
return np.vstack((data, batch_data))
elif isinstance(data, tuple):
X, y = data
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
X = append_batch(X, batch_X)
y = append_batch(y, batch_y)
return X, y
elif torch_installed and isinstance(data, torch.Tensor):
if isinstance(batch_data, torch.Tensor):
return torch.cat((data, batch_data))
try:
import h5py
if isinstance(data, h5py.File):
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
except ModuleNotFoundError:
pass
# TODO:Handle dataframes
def create_data_loader(X, y=None, batch_size=1, num_workers=0, shuffle=True):
"""A function that takes a dataset as input and outputs a Pytorch dataloader.
Parameters
----------
X : Input data.
The formats supported are Pandas DataFrame, Numpy array,
a sparse matrix, torch.tensor, torch.utils.data.Dataset, path to a HDF5 file,
lale.util.batch_data_dictionary_dataset.BatchDataDict,
a Python dictionary of the format `{"dataset": torch.utils.data.Dataset,
"collate_fn":collate_fn for torch.utils.data.DataLoader}`
y : Labels., optional
Supported formats are Numpy array or Pandas series, by default None
batch_size : int, optional
Number of samples in each batch, by default 1
num_workers : int, optional
Number of workers used by the data loader, by default 0
shuffle: boolean, optional, default True
Whether to use SequentialSampler or RandomSampler for creating batches
Returns
-------
torch.utils.data.DataLoader
Raises
------
TypeError
Raises a TypeError if the input format is not supported.
"""
import torch
from torch.utils.data import DataLoader, Dataset, TensorDataset
from lale.util.batch_data_dictionary_dataset import BatchDataDict
from lale.util.hdf5_to_torch_dataset import HDF5TorchDataset
from lale.util.numpy_torch_dataset import NumpyTorchDataset, numpy_collate_fn
from lale.util.pandas_torch_dataset import PandasTorchDataset, pandas_collate_fn
collate_fn = None
worker_init_fn = None
if isinstance(X, Dataset):
dataset = X
elif isinstance(X, pd.DataFrame):
dataset = PandasTorchDataset(X, y)
collate_fn = pandas_collate_fn
elif isinstance(X, scipy.sparse.csr.csr_matrix):
# unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray
X = X.toarray()
if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):
y = y.view(np.ndarray)
dataset = NumpyTorchDataset(X, y)
collate_fn = numpy_collate_fn
elif isinstance(X, np.ndarray):
# unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray
if isinstance(X, lale.datasets.data_schemas.NDArrayWithSchema):
X = X.view(np.ndarray)
if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):
y = y.view(np.ndarray)
dataset = NumpyTorchDataset(X, y)
collate_fn = numpy_collate_fn
elif isinstance(X, str): # Assume that this is path to hdf5 file
dataset = HDF5TorchDataset(X)
elif isinstance(X, BatchDataDict):
dataset = X
def my_collate_fn(batch):
return batch[
0
] # because BatchDataDict's get_item returns a batch, so no collate is required.
return DataLoader(
dataset, batch_size=1, collate_fn=my_collate_fn, shuffle=shuffle
)
elif isinstance(X, dict): # Assumed that it is data indexed by batch number
if "dataset" in X:
dataset = X["dataset"]
collate_fn = X.get("collate_fn", None)
worker_init_fn = getattr(dataset, "worker_init_fn", None)
else:
return [X]
elif isinstance(X, torch.Tensor) and y is not None:
if isinstance(y, np.ndarray):
y = torch.from_numpy(y)
dataset = TensorDataset(X, y)
elif isinstance(X, torch.Tensor):
dataset = TensorDataset(X)
else:
raise TypeError(
"Can not create a data loader for a dataset with type {}".format(type(X))
)
return DataLoader(
dataset,
batch_size=batch_size,
collate_fn=collate_fn,
num_workers=num_workers,
worker_init_fn=worker_init_fn,
shuffle=shuffle,
)
def write_batch_output_to_file(
file_obj,
file_path,
total_len,
batch_idx,
batch_X,
batch_y,
batch_out_X,
batch_out_y,
):
if file_obj is None and file_path is None:
raise ValueError("Only one of the file object or file path can be None.")
if file_obj is None:
import h5py
file_obj = h5py.File(file_path, "w")
# estimate the size of the dataset based on the first batch output size
transform_ratio = int(len(batch_out_X) / len(batch_X))
if len(batch_out_X.shape) == 1:
h5_data_shape = (transform_ratio * total_len,)
elif len(batch_out_X.shape) == 2:
h5_data_shape = (transform_ratio * total_len, batch_out_X.shape[1])
elif len(batch_out_X.shape) == 3:
h5_data_shape = (
transform_ratio * total_len,
batch_out_X.shape[1],
batch_out_X.shape[2],
)
else:
raise ValueError(
"batch_out_X is expected to be a 1-d, 2-d or 3-d array. Any other data types are not handled."
)
dataset = file_obj.create_dataset(
name="X", shape=h5_data_shape, chunks=True, compression="gzip"
)
if batch_out_y is None and batch_y is not None:
batch_out_y = batch_y
if batch_out_y is not None:
if len(batch_out_y.shape) == 1:
h5_labels_shape = (transform_ratio * total_len,)
elif len(batch_out_y.shape) == 2:
h5_labels_shape = (transform_ratio * total_len, batch_out_y.shape[1])
else:
raise ValueError(
"batch_out_y is expected to be a 1-d or 2-d array. Any other data types are not handled."
)
dataset = file_obj.create_dataset(
name="y", shape=h5_labels_shape, chunks=True, compression="gzip"
)
dataset = file_obj["X"]
dataset[
batch_idx * len(batch_out_X) : (batch_idx + 1) * len(batch_out_X)
] = batch_out_X
if batch_out_y is not None or batch_y is not None:
labels = file_obj["y"]
if batch_out_y is not None:
labels[
batch_idx * len(batch_out_y) : (batch_idx + 1) * len(batch_out_y)
] = batch_out_y
else:
labels[batch_idx * len(batch_y) : (batch_idx + 1) * len(batch_y)] = batch_y
return file_obj
def add_missing_values(orig_X, missing_rate=0.1, seed=None):
# see scikit-learn.org/stable/auto_examples/impute/plot_missing_values.html
n_samples, n_features = orig_X.shape
n_missing_samples = int(n_samples * missing_rate)
if seed is None:
rng = np.random.RandomState()
else:
rng = np.random.RandomState(seed)
missing_samples = np.zeros(n_samples, dtype=bool)
missing_samples[:n_missing_samples] = True
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
missing_X = orig_X.copy()
if isinstance(missing_X, np.ndarray):
missing_X[missing_samples, missing_features] = np.nan
else:
assert isinstance(missing_X, pd.DataFrame)
i_missing_sample = 0
for i_sample in range(n_samples):
if missing_samples[i_sample]:
i_feature = missing_features[i_missing_sample]
i_missing_sample += 1
missing_X.iloc[i_sample, i_feature] = np.nan
return missing_X
# helpers for manipulating (extended) sklearn style paths.
# documentation of the path format is part of the operators module docstring
def partition_sklearn_params(
d: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]:
sub_parts: Dict[str, Dict[str, Any]] = {}
main_parts: Dict[str, Any] = {}
for k, v in d.items():
ks = k.split("__", 1)
if len(ks) == 1:
assert k not in main_parts
main_parts[k] = v
else:
assert len(ks) == 2
bucket: Dict[str, Any] = {}
group: str = ks[0]
param: str = ks[1]
if group in sub_parts:
bucket = sub_parts[group]
else:
sub_parts[group] = bucket
assert param not in bucket
bucket[param] = v
return (main_parts, sub_parts)
def partition_sklearn_choice_params(d: Dict[str, Any]) -> Tuple[int, Dict[str, Any]]:
discriminant_value: int = -1
choice_parts: Dict[str, Any] = {}
for k, v in d.items():
if k == discriminant_name:
assert discriminant_value == -1
discriminant_value = int(v)
else:
k_rest = unnest_choice(k)
choice_parts[k_rest] = v
assert discriminant_value != -1
return (discriminant_value, choice_parts)
DUMMY_SEARCH_SPACE_GRID_PARAM_NAME: str = "$"
discriminant_name: str = "?"
choice_prefix: str = "?"
structure_type_name: str = "#"
structure_type_list: str = "list"
structure_type_tuple: str = "tuple"
structure_type_dict: str = "dict"
def get_name_and_index(name: str) -> Tuple[str, int]:
"""given a name of the form "name@i", returns (name, i)
if given a name of the form "name", returns (name, 0)
"""
splits = name.split("@", 1)
if len(splits) == 1:
return splits[0], 0
else:
return splits[0], int(splits[1])
def make_degen_indexed_name(name, index):
return f"{name}@{index}"
def make_indexed_name(name, index):
if index == 0:
return name
else:
return f"{name}@{index}"
def make_array_index_name(index, is_tuple: bool = False):
sep = "##" if is_tuple else "#"
return f"{sep}{str(index)}"
def is_numeric_structure(structure_type: str):
if structure_type == "list" or structure_type == "tuple":
return True
elif structure_type == "dict":
return False
else:
assert False, f"Unknown structure type {structure_type} found"
V = TypeVar("V")
def nest_HPparam(name: str, key: str):
if key == DUMMY_SEARCH_SPACE_GRID_PARAM_NAME:
# we can get rid of the dummy now, since we have a name for it
return name
return name + "__" + key
def nest_HPparams(name: str, grid: Mapping[str, V]) -> Dict[str, V]:
return {(nest_HPparam(name, k)): v for k, v in grid.items()}
def nest_all_HPparams(
name: str, grids: Iterable[Mapping[str, V]]
) -> List[Dict[str, V]]:
"""Given the name of an operator in a pipeline, this transforms every key(parameter name) in the grids
to use the operator name as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_HPparams(name, grid) for grid in grids]
def nest_choice_HPparam(key: str):
return choice_prefix + key
def nest_choice_HPparams(grid: Mapping[str, V]) -> Dict[str, V]:
return {(nest_choice_HPparam(k)): v for k, v in grid.items()}
def nest_choice_all_HPparams(grids: Iterable[Mapping[str, V]]) -> List[Dict[str, V]]:
"""this transforms every key(parameter name) in the grids
to be nested under a choice, using a ? as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_choice_HPparams(grid) for grid in grids]
def unnest_choice(k: str) -> str:
assert k.startswith(choice_prefix)
return k[len(choice_prefix) :]
def unnest_HPparams(k: str) -> List[str]:
return k.split("__")
def are_hyperparameters_equal(hyperparam1, hyperparam2):
if isinstance(
hyperparam1, np.ndarray
): # hyperparam2 is from schema default, so it may not always be an array
return np.all(hyperparam1 == hyperparam2)
else:
return hyperparam1 == hyperparam2
def _is_ast_subscript(expr):
return isinstance(expr, ast.Subscript)
def _is_ast_attribute(expr):
return isinstance(expr, ast.Attribute)
def _is_ast_constant(expr):
return isinstance(expr, ast.Constant)
def _is_ast_subs_or_attr(expr):
return isinstance(expr, ast.Subscript) or isinstance(expr, ast.Attribute)
def _is_ast_call(expr):
return isinstance(expr, ast.Call)
def _is_ast_name(expr):
return isinstance(expr, ast.Name)
def _ast_func_id(expr):
if isinstance(expr, ast.Name):
return expr.id
else:
raise ValueError("function name expected")
def _is_df(df):
return _is_pandas_df(df) or _is_spark_df(df)
def _is_pandas_series(df):
return isinstance(df, pd.Series)
def _is_pandas_df(df):
return isinstance(df, pd.DataFrame)
def _is_spark_df(df):
if spark_installed:
return isinstance(df, spark_df)
else:
return False
def _is_spark_with_index(df):
if spark_installed:
return isinstance(df, lale.datasets.data_schemas.SparkDataFrameWithIndex)
else:
return False
def _ensure_pandas(df) -> pd.DataFrame:
if _is_spark_df(df):
return df.toPandas()
assert _is_pandas_df(df), type(df)
return df
def _get_subscript_value(subscript_expr):
if isinstance(subscript_expr.slice, ast.Constant): # for Python 3.9
subscript_value = subscript_expr.slice.value
else:
subscript_value = subscript_expr.slice.value.s # type: ignore
return subscript_value
class GenSym:
def __init__(self, names: Set[str]):
self._names = names
def __call__(self, prefix):
if prefix in self._names:
suffix = 0
while True:
result = f"{prefix}_{suffix}"
if result not in self._names:
break
suffix += 1
else:
result = prefix
self._names |= {result}
return result
| [
"torch.cat",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"torch.utils.data.TensorDataset"
] | 1.0 | ksrinivs64/lale | e0ffc357c3711940078718717aebc5b06c9dc4ae |
1.8 | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import torch
from torch import nn
import torch.nn.functional as F
class NonLocalModule(nn.Module):
def __init__(self, in_channels, embed_dim=None, embed_factor=4, spatial_sub_sample=False):
super().__init__()
assert embed_factor >= 1
self.embed_dim = embed_dim if embed_dim is not None else in_channels // embed_factor
self.theta = self._conv_1x1(in_channels, self.embed_dim)
self.phi = nn.Sequential(
nn.MaxPool2d(kernel_size=(2, 2)) if spatial_sub_sample else nn.Sequential(),
self._conv_1x1(in_channels, self.embed_dim))
self.g = nn.Sequential(
nn.MaxPool2d(kernel_size=(2, 2)) if spatial_sub_sample else nn.Sequential(),
self._conv_1x1(in_channels, self.embed_dim))
self.W = nn.Sequential(
self._conv_1x1(self.embed_dim, in_channels),
nn.BatchNorm2d(in_channels)
)
self._init_params()
@staticmethod
def _conv_1x1(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(out_channels)
)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
theta = self.theta(x)
phi = self.phi(x)
g = self.g(x)
theta = theta.view(theta.shape[:2] + (-1,))
phi = phi.view(phi.shape[:2] + (-1,))
g = g.view(g.shape[:2] + (-1,))
theta_phi = torch.matmul(theta.transpose(1, 2), phi)
attention = F.softmax(theta_phi, dim=2)
y = torch.matmul(g, attention)
y = y.view(y.shape[:2] + x.shape[2:])
out = self.W(y) + x
return out
| [
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.matmul"
] | 1.8 | ricklentz/deep-object-reid | bf4d30d78e4a34847496d0efb50d98541f5274f9 |
1.4 | import argparse
import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from transformers import AutoTokenizer, get_linear_schedule_with_warmup
import OpenMatch as om
from transformers import AdamW
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from utils import is_first_worker, DistributedEvalSampler, merge_resfile, set_dist_args, optimizer_to
from contextlib import nullcontext # from contextlib import suppress as nullcontext # for python < 3.7
torch.multiprocessing.set_sharing_strategy('file_system')
import logging
import random
import numpy as np
logger = logging.getLogger(__name__)
# from torch.utils.tensorboard import SummaryWriter
# writer = SummaryWriter(log_dir='logs')
def dev(args, model, metric, dev_loader, device):
rst_dict = {}
for dev_batch in dev_loader:
query_id, doc_id, label, retrieval_score = dev_batch['query_id'], dev_batch['doc_id'], dev_batch['label'], dev_batch['retrieval_score']
with torch.no_grad():
if args.model == 'bert':
batch_score, _ = model(dev_batch['input_ids'].to(device), dev_batch['input_mask'].to(device), dev_batch['segment_ids'].to(device))
elif args.model == 'roberta':
batch_score, _ = model(dev_batch['input_ids'].to(device), dev_batch['input_mask'].to(device))
elif args.model == 'edrm':
batch_score, _ = model(dev_batch['query_wrd_idx'].to(device), dev_batch['query_wrd_mask'].to(device),
dev_batch['doc_wrd_idx'].to(device), dev_batch['doc_wrd_mask'].to(device),
dev_batch['query_ent_idx'].to(device), dev_batch['query_ent_mask'].to(device),
dev_batch['doc_ent_idx'].to(device), dev_batch['doc_ent_mask'].to(device),
dev_batch['query_des_idx'].to(device), dev_batch['doc_des_idx'].to(device))
else:
batch_score, _ = model(dev_batch['query_idx'].to(device), dev_batch['query_mask'].to(device),
dev_batch['doc_idx'].to(device), dev_batch['doc_mask'].to(device))
if args.task == 'classification':
batch_score = batch_score.softmax(dim=-1)[:, 1].squeeze(-1)
batch_score = batch_score.detach().cpu().tolist()
for (q_id, d_id, b_s, l) in zip(query_id, doc_id, batch_score, label):
if q_id not in rst_dict:
rst_dict[q_id] = {}
if d_id not in rst_dict[q_id] or b_s > rst_dict[q_id][d_id][0]:
rst_dict[q_id][d_id] = [b_s, l]
return rst_dict
def train_reinfoselect(args, model, policy, loss_fn, m_optim, m_scheduler, p_optim, metric, train_loader, dev_loader, device):
best_mes = 0.0
with torch.no_grad():
rst_dict = dev(args, model, metric, dev_loader, device)
om.utils.save_trec(args.res, rst_dict)
if args.metric.split('_')[0] == 'mrr':
mes = metric.get_mrr(args.qrels, args.res, args.metric)
else:
mes = metric.get_metric(args.qrels, args.res, args.metric)
if mes >= best_mes:
best_mes = mes
print('save_model...')
if args.n_gpu > 1:
torch.save(model.module.state_dict(), args.save)
else:
torch.save(model.state_dict(), args.save)
print('initial result: ', mes)
last_mes = mes
for epoch in range(args.epoch):
avg_loss = 0.0
log_prob_ps = []
log_prob_ns = []
for step, train_batch in enumerate(train_loader):
if args.model == 'bert':
if args.task == 'ranking':
batch_probs, _ = policy(train_batch['input_ids_pos'].to(device), train_batch['input_mask_pos'].to(device), train_batch['segment_ids_pos'].to(device))
elif args.task == 'classification':
batch_probs, _ = policy(train_batch['input_ids'].to(device), train_batch['input_mask'].to(device), train_batch['segment_ids'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
elif args.model == 'roberta':
if args.task == 'ranking':
batch_probs, _ = policy(train_batch['input_ids_pos'].to(device), train_batch['input_mask_pos'].to(device))
elif args.task == 'classification':
batch_probs, _ = policy(train_batch['input_ids'].to(device), train_batch['input_mask'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
elif args.model == 'edrm':
if args.task == 'ranking':
batch_probs, _ = policy(train_batch['query_wrd_idx'].to(device), train_batch['query_wrd_mask'].to(device),
train_batch['doc_pos_wrd_idx'].to(device), train_batch['doc_pos_wrd_mask'].to(device))
elif args.task == 'classification':
batch_probs, _ = policy(train_batch['query_wrd_idx'].to(device), train_batch['query_wrd_mask'].to(device),
train_batch['doc_wrd_idx'].to(device), train_batch['doc_wrd_mask'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
else:
if args.task == 'ranking':
batch_probs, _ = policy(train_batch['query_idx'].to(device), train_batch['query_mask'].to(device),
train_batch['doc_pos_idx'].to(device), train_batch['doc_pos_mask'].to(device))
elif args.task == 'classification':
batch_probs, _ = policy(train_batch['query_idx'].to(device), train_batch['query_mask'].to(device),
train_batch['doc_idx'].to(device), train_batch['doc_mask'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
batch_probs = F.gumbel_softmax(batch_probs, tau=args.tau)
m = Categorical(batch_probs)
action = m.sample()
if action.sum().item() < 1:
#m_scheduler.step()
if (step+1) % args.eval_every == 0 and len(log_prob_ps) > 0:
with torch.no_grad():
rst_dict = dev(args, model, metric, dev_loader, device)
om.utils.save_trec(args.res, rst_dict)
if args.metric.split('_')[0] == 'mrr':
mes = metric.get_mrr(args.qrels, args.res, args.metric)
else:
mes = metric.get_metric(args.qrels, args.res, args.metric)
if mes >= best_mes:
best_mes = mes
print('save_model...')
if args.n_gpu > 1:
torch.save(model.module.state_dict(), args.save)
else:
torch.save(model.state_dict(), args.save)
print(step+1, avg_loss/len(log_prob_ps), mes, best_mes)
avg_loss = 0.0
reward = mes - last_mes
last_mes = mes
if reward >= 0:
policy_loss = [(-log_prob_p * reward).sum().unsqueeze(-1) for log_prob_p in log_prob_ps]
else:
policy_loss = [(log_prob_n * reward).sum().unsqueeze(-1) for log_prob_n in log_prob_ns]
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
p_optim.step()
p_optim.zero_grad()
if args.reset:
state_dict = torch.load(args.save)
model.load_state_dict(state_dict)
last_mes = best_mes
log_prob_ps = []
log_prob_ns = []
continue
filt = action.nonzero().squeeze(-1).cpu()
if args.model == 'bert':
if args.task == 'ranking':
batch_score_pos, _ = model(train_batch['input_ids_pos'].index_select(0, filt).to(device),
train_batch['input_mask_pos'].index_select(0, filt).to(device),
train_batch['segment_ids_pos'].index_select(0, filt).to(device))
batch_score_neg, _ = model(train_batch['input_ids_neg'].index_select(0, filt).to(device),
train_batch['input_mask_neg'].index_select(0, filt).to(device),
train_batch['segment_ids_neg'].index_select(0, filt).to(device))
elif args.task == 'classification':
batch_score, _ = model(train_batch['input_ids'].index_select(0, filt).to(device),
train_batch['input_mask'].index_select(0, filt).to(device),
train_batch['segment_ids'].index_select(0, filt).to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
elif args.model == 'roberta':
if args.task == 'ranking':
batch_score_pos, _ = model(train_batch['input_ids_pos'].index_select(0, filt).to(device), train_batch['input_mask_pos'].index_select(0, filt).to(device))
batch_score_neg, _ = model(train_batch['input_ids_neg'].index_select(0, filt).to(device), train_batch['input_mask_neg'].index_select(0, filt).to(device))
elif args.task == 'classification':
batch_score, _ = model(train_batch['input_ids'].index_select(0, filt).to(device), train_batch['input_mask'].index_select(0, filt).to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
elif args.model == 'edrm':
if args.task == 'ranking':
batch_score_pos, _ = model(train_batch['query_wrd_idx'].index_select(0, filt).to(device), train_batch['query_wrd_mask'].index_select(0, filt).to(device),
train_batch['doc_pos_wrd_idx'].index_select(0, filt).to(device), train_batch['doc_pos_wrd_mask'].index_select(0, filt).to(device),
train_batch['query_ent_idx'].index_select(0, filt).to(device), train_batch['query_ent_mask'].index_select(0, filt).to(device),
train_batch['doc_pos_ent_idx'].index_select(0, filt).to(device), train_batch['doc_pos_ent_mask'].index_select(0, filt).to(device),
train_batch['query_des_idx'].index_select(0, filt).to(device), train_batch['doc_pos_des_idx'].index_select(0, filt).to(device))
batch_score_neg, _ = model(train_batch['query_wrd_idx'].index_select(0, filt).to(device), train_batch['query_wrd_mask'].index_select(0, filt).to(device),
train_batch['doc_neg_wrd_idx'].index_select(0, filt).to(device), train_batch['doc_neg_wrd_mask'].index_select(0, filt).to(device),
train_batch['query_ent_idx'].index_select(0, filt).to(device), train_batch['query_ent_mask'].index_select(0, filt).to(device),
train_batch['doc_neg_ent_idx'].index_select(0, filt).to(device), train_batch['doc_neg_ent_mask'].index_select(0, filt).to(device),
train_batch['query_des_idx'].index_select(0, filt).to(device), train_batch['doc_neg_des_idx'].index_select(0, filt).to(device))
elif args.task == 'classification':
batch_score, _ = model(train_batch['query_wrd_idx'].index_select(0, filt).to(device), train_batch['query_wrd_mask'].index_select(0, filt).to(device),
train_batch['doc_wrd_idx'].index_select(0, filt).to(device), train_batch['doc_wrd_mask'].index_select(0, filt).to(device),
train_batch['query_ent_idx'].index_select(0, filt).to(device), train_batch['query_ent_mask'].index_select(0, filt).to(device),
train_batch['doc_ent_idx'].index_select(0, filt).to(device), train_batch['doc_ent_mask'].index_select(0, filt).to(device),
train_batch['query_des_idx'].index_select(0, filt).to(device), train_batch['doc_des_idx'].index_select(0, filt).to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
else:
if args.task == 'ranking':
batch_score_pos, _ = model(train_batch['query_idx'].index_select(0, filt).to(device), train_batch['query_mask'].index_select(0, filt).to(device),
train_batch['doc_pos_idx'].index_select(0, filt).to(device), train_batch['doc_pos_mask'].index_select(0, filt).to(device))
batch_score_neg, _ = model(train_batch['query_idx'].index_select(0, filt).to(device), train_batch['query_mask'].index_select(0, filt).to(device),
train_batch['doc_neg_idx'].index_select(0, filt).to(device), train_batch['doc_neg_mask'].index_select(0, filt).to(device))
elif args.task == 'classification':
batch_score, _ = model(train_batch['query_idx'].index_select(0, filt).to(device), train_batch['query_mask'].index_select(0, filt).to(device),
train_batch['doc_idx'].index_select(0, filt).to(device), train_batch['doc_mask'].index_select(0, filt).to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
mask = action.ge(0.5)
log_prob_p = m.log_prob(action)
log_prob_n = m.log_prob(1-action)
log_prob_ps.append(torch.masked_select(log_prob_p, mask))
log_prob_ns.append(torch.masked_select(log_prob_n, mask))
if args.task == 'ranking':
batch_loss = loss_fn(batch_score_pos.tanh(), batch_score_neg.tanh(), torch.ones(batch_score_pos.size()).to(device))
elif args.task == 'classification':
batch_loss = loss_fn(batch_score, train_batch['label'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
if args.n_gpu > 1:
batch_loss = batch_loss.mean(-1)
batch_loss = batch_loss.mean()
avg_loss += batch_loss.item()
batch_loss.backward()
m_optim.step()
m_scheduler.step()
m_optim.zero_grad()
if (step+1) % args.eval_every == 0:
with torch.no_grad():
rst_dict = dev(args, model, metric, dev_loader, device)
om.utils.save_trec(args.res, rst_dict)
if args.metric.split('_')[0] == 'mrr':
mes = metric.get_mrr(args.qrels, args.res, args.metric)
else:
mes = metric.get_metric(args.qrels, args.res, args.metric)
if mes >= best_mes:
best_mes = mes
print('save_model...')
if args.n_gpu > 1:
torch.save(model.module.state_dict(), args.save)
else:
torch.save(model.state_dict(), args.save)
print(step+1, avg_loss/len(log_prob_ps), mes, best_mes)
avg_loss = 0.0
reward = mes - last_mes
last_mes = mes
if reward >= 0:
policy_loss = [(-log_prob_p * reward).sum().unsqueeze(-1) for log_prob_p in log_prob_ps]
else:
policy_loss = [(log_prob_n * reward).sum().unsqueeze(-1) for log_prob_n in log_prob_ns]
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
p_optim.step()
p_optim.zero_grad()
if args.reset:
state_dict = torch.load(args.save)
model.load_state_dict(state_dict)
last_mes = best_mes
log_prob_ps = []
log_prob_ns = []
def train(args, model, loss_fn, m_optim, m_scheduler, metric, train_loader, dev_loader, device, train_sampler=None):
best_mes = 0.0
global_step = 0 # steps that outside epoches
for epoch in range(args.epoch):
if args.local_rank != -1:
train_sampler.set_epoch(epoch) # shuffle data for distributed
logger.warning("current gpu local_rank {}".format(args.local_rank))
avg_loss = 0.0
for step, train_batch in enumerate(train_loader):
sync_context = model.no_sync if (args.local_rank != -1 and (step+1) % args.gradient_accumulation_steps != 0) else nullcontext
if args.model == 'bert':
if args.task == 'ranking':
# sync gradients only at gradient accumulation step
with sync_context():
batch_score_pos, _ = model(train_batch['input_ids_pos'].to(device), train_batch['input_mask_pos'].to(device), train_batch['segment_ids_pos'].to(device))
batch_score_neg, _ = model(train_batch['input_ids_neg'].to(device), train_batch['input_mask_neg'].to(device), train_batch['segment_ids_neg'].to(device))
elif args.task == 'classification':
with sync_context():
batch_score, _ = model(train_batch['input_ids'].to(device), train_batch['input_mask'].to(device), train_batch['segment_ids'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
elif args.model == 'roberta':
if args.task == 'ranking':
batch_score_pos, _ = model(train_batch['input_ids_pos'].to(device), train_batch['input_mask_pos'].to(device))
batch_score_neg, _ = model(train_batch['input_ids_neg'].to(device), train_batch['input_mask_neg'].to(device))
elif args.task == 'classification':
batch_score, _ = model(train_batch['input_ids'].to(device), train_batch['input_mask'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
elif args.model == 'edrm':
if args.task == 'ranking':
batch_score_pos, _ = model(train_batch['query_wrd_idx'].to(device), train_batch['query_wrd_mask'].to(device),
train_batch['doc_pos_wrd_idx'].to(device), train_batch['doc_pos_wrd_mask'].to(device),
train_batch['query_ent_idx'].to(device), train_batch['query_ent_mask'].to(device),
train_batch['doc_pos_ent_idx'].to(device), train_batch['doc_pos_ent_mask'].to(device),
train_batch['query_des_idx'].to(device), train_batch['doc_pos_des_idx'].to(device))
batch_score_neg, _ = model(train_batch['query_wrd_idx'].to(device), train_batch['query_wrd_mask'].to(device),
train_batch['doc_neg_wrd_idx'].to(device), train_batch['doc_neg_wrd_mask'].to(device),
train_batch['query_ent_idx'].to(device), train_batch['query_ent_mask'].to(device),
train_batch['doc_neg_ent_idx'].to(device), train_batch['doc_neg_ent_mask'].to(device),
train_batch['query_des_idx'].to(device), train_batch['doc_neg_des_idx'].to(device))
elif args.task == 'classification':
batch_score, _ = model(train_batch['query_wrd_idx'].to(device), train_batch['query_wrd_mask'].to(device),
train_batch['doc_wrd_idx'].to(device), train_batch['doc_wrd_mask'].to(device),
train_batch['query_ent_idx'].to(device), train_batch['query_ent_mask'].to(device),
train_batch['doc_ent_idx'].to(device), train_batch['doc_ent_mask'].to(device),
train_batch['query_des_idx'].to(device), train_batch['doc_des_idx'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
else:
if args.task == 'ranking':
with sync_context():
batch_score_pos, _ = model(train_batch['query_idx'].to(device), train_batch['query_mask'].to(device),
train_batch['doc_pos_idx'].to(device), train_batch['doc_pos_mask'].to(device))
batch_score_neg, _ = model(train_batch['query_idx'].to(device), train_batch['query_mask'].to(device),
train_batch['doc_neg_idx'].to(device), train_batch['doc_neg_mask'].to(device))
elif args.task == 'classification':
batch_score, _ = model(train_batch['query_idx'].to(device), train_batch['query_mask'].to(device),
train_batch['doc_idx'].to(device), train_batch['doc_mask'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
if args.task == 'ranking':
with sync_context():
if args.ranking_loss == 'margin_loss':
batch_loss = loss_fn(batch_score_pos.tanh(), batch_score_neg.tanh(), torch.ones(batch_score_pos.size()).to(device))
elif args.ranking_loss == 'CE_loss':
batch_loss = loss_fn(torch.sigmoid(batch_score_pos-batch_score_neg),torch.ones(batch_score_neg.size()).to(device))
elif args.ranking_loss == 'triplet_loss':
logit_matrix = torch.cat([batch_score_pos.reshape([-1,1]),batch_score_neg.reshape([-1,1])], dim=1)
lsm = F.log_softmax(input=logit_matrix,dim=1)
batch_loss = torch.mean(-1.0 * lsm[:, 0])
elif args.ranking_loss == 'LCE_loss':
pass
elif args.task == 'classification':
with sync_context():
batch_loss = loss_fn(batch_score, train_batch['label'].to(device))
else:
raise ValueError('Task must be `ranking` or `classification`.')
if args.n_gpu > 1:
batch_loss = batch_loss.mean()
if args.gradient_accumulation_steps > 1:
batch_loss = batch_loss / args.gradient_accumulation_steps
avg_loss += batch_loss.item()
with sync_context():
batch_loss.backward()
# if args.local_rank != -1:
# if (step+1) % args.gradient_accumulation_steps == 0:
# batch_loss.backward()
# else:
# with model.no_sync():
# batch_loss.backward()
# else:
# batch_loss.backward()
if (step+1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
m_optim.step()
m_scheduler.step()
m_optim.zero_grad()
global_step += 1
if args.logging_step > 0 and ((global_step+1) % args.logging_step == 0 or (args.test_init_log and global_step==0)):
# if is_first_worker():
if args.local_rank in [-1,0]:
logger.info( "training gpu {}:, global step: {}, local step: {}, loss: {}".format(args.local_rank,global_step+1, step+1, avg_loss/args.logging_step))
# writer.add_scalar('avg_loss',avg_loss/args.logging_step, step)
# writer.add_scalar('dev', mes, step)
avg_loss = 0.0
if (global_step+1) % args.eval_every == 0 or (args.test_init_log and global_step==0):
model.eval()
with torch.no_grad():
rst_dict = dev(args, model, metric, dev_loader, device)
model.train()
if args.local_rank != -1:
# distributed mode, save dicts and merge
om.utils.save_trec(args.res + "_rank_{:03}".format(args.local_rank), rst_dict)
dist.barrier()
# if is_first_worker():
if args.local_rank in [-1,0]:
merge_resfile(args.res + "_rank_*", args.res)
else:
om.utils.save_trec(args.res, rst_dict)
# if is_first_worker():
if args.local_rank in [-1,0]:
if args.metric.split('_')[0] == 'mrr':
mes = metric.get_mrr(args.qrels, args.res, args.metric)
else:
mes = metric.get_metric(args.qrels, args.res, args.metric)
best_mes = mes if mes >= best_mes else best_mes
logger.info( 'save_model at step {}'.format(global_step+1))
if args.n_gpu > 1:
torch.save(model.module.state_dict(), args.save + "_step-{}".format(global_step+1))
else:
torch.save(model.state_dict(), args.save + "_step-{}".format(global_step+1))
logger.info( "global step: {}, messure: {}, best messure: {}".format(global_step+1, mes, best_mes))
# dist.barrier()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-task', type=str, default='ranking')
parser.add_argument('-ranking_loss', type=str, default='margin_loss')
parser.add_argument('-model', type=str, default='bert')
parser.add_argument('-optimizer', type=str, default='adam')
parser.add_argument('-reinfoselect', action='store_true', default=False)
parser.add_argument('-reset', action='store_true', default=False)
parser.add_argument('-train', action=om.utils.DictOrStr, default='./data/train_toy.jsonl')
parser.add_argument('-max_input', type=int, default=1280000)
parser.add_argument('-save', type=str, default='./checkpoints/bert.bin')
parser.add_argument('-dev', action=om.utils.DictOrStr, default='./data/dev_toy.jsonl')
parser.add_argument('-qrels', type=str, default='./data/qrels_toy')
parser.add_argument('-vocab', type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument('-ent_vocab', type=str, default='')
parser.add_argument('-pretrain', type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument('-checkpoint', type=str, default=None)
parser.add_argument('-res', type=str, default='./results/bert.trec')
parser.add_argument('-metric', type=str, default='ndcg_cut_10')
parser.add_argument('-mode', type=str, default='cls')
parser.add_argument('-n_kernels', type=int, default=21)
parser.add_argument('-max_query_len', type=int, default=20)
parser.add_argument('-max_doc_len', type=int, default=150)
parser.add_argument('-maxp', action='store_true', default=False)
parser.add_argument('-epoch', type=int, default=1)
parser.add_argument('-batch_size', type=int, default=8)
parser.add_argument('-dev_eval_batch_size', type=int, default=128)
parser.add_argument('-lr', type=float, default=2e-5)
parser.add_argument('-tau', type=float, default=1)
parser.add_argument('-n_warmup_steps', type=int, default=1000)
parser.add_argument('-gradient_accumulation_steps', type=int, default=4)
parser.add_argument("-max_grad_norm", default=1.0,type=float,help="Max gradient norm.",)
parser.add_argument('-eval_every', type=int, default=1000)
parser.add_argument('-logging_step', type=int, default=100)
parser.add_argument('-test_init_log', action='store_true', default=False)
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--local_rank', type=int, default=-1) # for distributed mode
parser.add_argument( "--server_ip",type=str,default="", help="For distant debugging.",)
parser.add_argument( "--server_port",type=str, default="",help="For distant debugging.",)
args = parser.parse_args()
set_dist_args(args) # get local cpu/gpu device
args.model = args.model.lower()
if args.model == 'bert':
tokenizer = AutoTokenizer.from_pretrained(args.vocab)
logger.info('reading training data...')
if args.maxp:
train_set = om.data.datasets.BertMaxPDataset(
dataset=args.train,
tokenizer=tokenizer,
mode='train',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
else:
train_set = om.data.datasets.BertDataset(
dataset=args.train,
tokenizer=tokenizer,
mode='train',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
logger.info('reading dev data...')
if args.maxp:
dev_set = om.data.datasets.BertMaxPDataset(
dataset=args.dev,
tokenizer=tokenizer,
mode='dev',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
else:
dev_set = om.data.datasets.BertDataset(
dataset=args.dev,
tokenizer=tokenizer,
mode='dev',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
elif args.model == 'roberta':
tokenizer = AutoTokenizer.from_pretrained(args.vocab)
print('reading training data...')
train_set = om.data.datasets.RobertaDataset(
dataset=args.train,
tokenizer=tokenizer,
mode='train',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
print('reading dev data...')
dev_set = om.data.datasets.RobertaDataset(
dataset=args.dev,
tokenizer=tokenizer,
mode='dev',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
elif args.model == 'edrm':
tokenizer = om.data.tokenizers.WordTokenizer(
pretrained=args.vocab
)
ent_tokenizer = om.data.tokenizers.WordTokenizer(
vocab=args.ent_vocab
)
print('reading training data...')
train_set = om.data.datasets.EDRMDataset(
dataset=args.train,
wrd_tokenizer=tokenizer,
ent_tokenizer=ent_tokenizer,
mode='train',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
des_max_len=20,
max_ent_num=3,
max_input=args.max_input,
task=args.task
)
print('reading dev data...')
dev_set = om.data.datasets.EDRMDataset(
dataset=args.dev,
wrd_tokenizer=tokenizer,
ent_tokenizer=ent_tokenizer,
mode='dev',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
des_max_len=20,
max_ent_num=3,
max_input=args.max_input,
task=args.task
)
else:
tokenizer = om.data.tokenizers.WordTokenizer(
pretrained=args.vocab
)
print('reading training data...')
train_set = om.data.datasets.Dataset(
dataset=args.train,
tokenizer=tokenizer,
mode='train',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
print('reading dev data...')
dev_set = om.data.datasets.Dataset(
dataset=args.dev,
tokenizer=tokenizer,
mode='dev',
query_max_len=args.max_query_len,
doc_max_len=args.max_doc_len,
max_input=args.max_input,
task=args.task
)
if args.local_rank != -1:
# train_sampler = DistributedSampler(train_set, args.world_size, args.local_rank)
train_sampler = DistributedSampler(train_set)
train_loader = om.data.DataLoader(
dataset=train_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=1,
sampler=train_sampler
)
#dev_sampler = DistributedSampler(dev_set)
dev_sampler = DistributedEvalSampler(dev_set)
dev_loader = om.data.DataLoader(
dataset=dev_set,
batch_size=args.batch_size * 16 if args.dev_eval_batch_size <= 0 else args.dev_eval_batch_size,
shuffle=False,
num_workers=1,
sampler=dev_sampler
)
dist.barrier()
else:
train_loader = om.data.DataLoader(
dataset=train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=8
)
dev_loader = om.data.DataLoader(
dataset=dev_set,
batch_size=args.batch_size * 16,
shuffle=False,
num_workers=8
)
train_sampler = None
if args.model == 'bert' or args.model == 'roberta':
if args.maxp:
model = om.models.BertMaxP(
pretrained=args.pretrain,
max_query_len=args.max_query_len,
max_doc_len=args.max_doc_len,
mode=args.mode,
task=args.task
)
else:
model = om.models.Bert(
pretrained=args.pretrain,
mode=args.mode,
task=args.task
)
if args.reinfoselect:
policy = om.models.Bert(
pretrained=args.pretrain,
mode=args.mode,
task='classification'
)
elif args.model == 'edrm':
model = om.models.EDRM(
wrd_vocab_size=tokenizer.get_vocab_size(),
ent_vocab_size=ent_tokenizer.get_vocab_size(),
wrd_embed_dim=tokenizer.get_embed_dim(),
ent_embed_dim=128,
max_des_len=20,
max_ent_num=3,
kernel_num=args.n_kernels,
kernel_dim=128,
kernel_sizes=[1, 2, 3],
wrd_embed_matrix=tokenizer.get_embed_matrix(),
ent_embed_matrix=None,
task=args.task
)
elif args.model == 'tk':
model = om.models.TK(
vocab_size=tokenizer.get_vocab_size(),
embed_dim=tokenizer.get_embed_dim(),
head_num=10,
hidden_dim=100,
layer_num=2,
kernel_num=args.n_kernels,
dropout=0.0,
embed_matrix=tokenizer.get_embed_matrix(),
task=args.task
)
elif args.model == 'cknrm':
model = om.models.ConvKNRM(
vocab_size=tokenizer.get_vocab_size(),
embed_dim=tokenizer.get_embed_dim(),
kernel_num=args.n_kernels,
kernel_dim=128,
kernel_sizes=[1, 2, 3],
embed_matrix=tokenizer.get_embed_matrix(),
task=args.task
)
elif args.model == 'knrm':
model = om.models.KNRM(
vocab_size=tokenizer.get_vocab_size(),
embed_dim=tokenizer.get_embed_dim(),
kernel_num=args.n_kernels,
embed_matrix=tokenizer.get_embed_matrix(),
task=args.task
)
else:
raise ValueError('model name error.')
if args.reinfoselect and args.model != 'bert':
policy = om.models.ConvKNRM(
vocab_size=tokenizer.get_vocab_size(),
embed_dim=tokenizer.get_embed_dim(),
kernel_num=args.n_kernels,
kernel_dim=128,
kernel_sizes=[1, 2, 3],
embed_matrix=tokenizer.get_embed_matrix(),
task='classification'
)
if args.checkpoint is not None:
state_dict = torch.load(args.checkpoint)
if args.model == 'bert':
st = {}
for k in state_dict:
if k.startswith('bert'):
st['_model'+k[len('bert'):]] = state_dict[k]
elif k.startswith('classifier'):
st['_dense'+k[len('classifier'):]] = state_dict[k]
else:
st[k] = state_dict[k]
model.load_state_dict(st)
else:
model.load_state_dict(state_dict)
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = args.device
if args.reinfoselect:
if args.task == 'ranking':
loss_fn = nn.MarginRankingLoss(margin=1, reduction='none')
elif args.task == 'classification':
loss_fn = nn.CrossEntropyLoss(reduction='none')
else:
raise ValueError('Task must be `ranking` or `classification`.')
else:
if args.task == 'ranking':
if args.ranking_loss == 'margin_loss':
loss_fn = nn.MarginRankingLoss(margin=1)
elif args.ranking_loss == 'CE_loss':
loss_fn = nn.BCELoss()
elif args.ranking_loss == 'triplet_loss':
loss_fn = nn.BCELoss() # dummpy loss for occupation
# loss_fn = F.log_softmax(dim=1)
elif args.ranking_loss == 'LCE_loss':
print("LCE loss TODO")
# nn.CrossEntropyLoss()
elif args.task == 'classification':
loss_fn = nn.CrossEntropyLoss()
else:
raise ValueError('Task must be `ranking` or `classification`.')
model.to(device)
if args.reinfoselect:
policy.to(device)
loss_fn.to(device)
if args.n_gpu > 1:
model = nn.DataParallel(model)
loss_fn = nn.DataParallel(loss_fn)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[
args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
dist.barrier()
model.zero_grad()
model.train()
if args.optimizer.lower() == 'adam':
m_optim = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
elif args.optimizer.lower() == 'adamw':
m_optim = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
if args.local_rank == -1:
m_scheduler = get_linear_schedule_with_warmup(m_optim, num_warmup_steps=args.n_warmup_steps, num_training_steps=len(train_set)*args.epoch//args.batch_size)
else:
m_scheduler = get_linear_schedule_with_warmup(m_optim, num_warmup_steps=args.n_warmup_steps, num_training_steps=len(train_set)*args.epoch//(args.batch_size*args.world_size*args.gradient_accumulation_steps))
if args.reinfoselect:
p_optim = torch.optim.Adam(filter(lambda p: p.requires_grad, policy.parameters()), lr=args.lr)
optimizer_to(m_optim,device)
metric = om.metrics.Metric()
logger.info(args)
if args.reinfoselect:
train_reinfoselect(args, model, policy, loss_fn, m_optim, m_scheduler, p_optim, metric, train_loader, dev_loader, device)
else:
train(args, model, loss_fn, m_optim, m_scheduler, metric, train_loader, dev_loader, device, train_sampler=train_sampler)
if __name__ == "__main__":
main() | [
"torch.nn.functional.gumbel_softmax",
"torch.nn.MarginRankingLoss",
"torch.cat",
"torch.distributions.Categorical",
"torch.sigmoid",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.nn.functional.log_softmax",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.nn.BCELoss",
"torch.mean",
"torch.distributed.barrier",
"torch.nn.CrossEntropyLoss",
"torch.multiprocessing.set_sharing_strategy",
"torch.masked_select"
] | 1.4.0 | vishalbelsare/OpenMatch | 84b25502bf52c58b9e71bd0754b2fc192d9b448f |
1.4 | from typing import List
import torch
import torch.nn as nn
class Embedder(nn.Module):
def __init__(
self,
vocab_size: int,
embed_dim: int,
embed_matrix: List[float] = None
) -> None:
super(Embedder, self).__init__()
self._vocab_size = vocab_size
self._embed_dim = embed_dim
self._embedder = nn.Embedding(self._vocab_size, self._embed_dim, padding_idx=0)
if embed_matrix is not None:
self._embed_matrix = torch.tensor(embed_matrix)
self._embedder.weight = nn.Parameter(self._embed_matrix, requires_grad=True)
def forward(self, idx: torch.Tensor) -> torch.Tensor:
embed = self._embedder(idx)
return embed
| [
"torch.tensor",
"torch.nn.Embedding",
"torch.nn.Parameter"
] | 1.4.0 | vishalbelsare/OpenMatch | 84b25502bf52c58b9e71bd0754b2fc192d9b448f |
1.1 | #!usr/bin/python
# -*- coding: utf-8 -*-
"""
CAM visualization
"""
import argparse
from io import BytesIO
import matplotlib.pyplot as plt
import requests
from PIL import Image
import torch
from torchvision import models
from torchvision.transforms.functional import normalize, resize, to_tensor, to_pil_image
from torchcam.cams import CAM, GradCAM, GradCAMpp, SmoothGradCAMpp, ScoreCAM, SSCAM, ISSCAM
from torchcam.utils import overlay_mask
VGG_CONFIG = {_vgg: dict(input_layer='features', conv_layer='features')
for _vgg in models.vgg.__dict__.keys()}
RESNET_CONFIG = {_resnet: dict(input_layer='conv1', conv_layer='layer4', fc_layer='fc')
for _resnet in models.resnet.__dict__.keys()}
DENSENET_CONFIG = {_densenet: dict(input_layer='features', conv_layer='features', fc_layer='classifier')
for _densenet in models.densenet.__dict__.keys()}
MODEL_CONFIG = {
**VGG_CONFIG, **RESNET_CONFIG, **DENSENET_CONFIG,
'mobilenet_v2': dict(input_layer='features', conv_layer='features')
}
def main(args):
if args.device is None:
args.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
device = torch.device(args.device)
# Pretrained imagenet model
model = models.__dict__[args.model](pretrained=True).eval().to(device=device)
conv_layer = MODEL_CONFIG[args.model]['conv_layer']
input_layer = MODEL_CONFIG[args.model]['input_layer']
fc_layer = MODEL_CONFIG[args.model]['fc_layer']
# Image
if args.img.startswith('http'):
img_path = BytesIO(requests.get(args.img).content)
pil_img = Image.open(img_path, mode='r').convert('RGB')
# Preprocess image
img_tensor = normalize(to_tensor(resize(pil_img, (224, 224))),
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]).to(device=device)
# Hook the corresponding layer in the model
cam_extractors = [CAM(model, conv_layer, fc_layer), GradCAM(model, conv_layer),
GradCAMpp(model, conv_layer), SmoothGradCAMpp(model, conv_layer, input_layer),
ScoreCAM(model, conv_layer, input_layer), SSCAM(model, conv_layer, input_layer),
ISSCAM(model, conv_layer, input_layer)]
# Don't trigger all hooks
for extractor in cam_extractors:
extractor._hooks_enabled = False
fig, axes = plt.subplots(1, len(cam_extractors), figsize=(7, 2))
for idx, extractor in enumerate(cam_extractors):
extractor._hooks_enabled = True
model.zero_grad()
scores = model(img_tensor.unsqueeze(0))
# Select the class index
class_idx = scores.squeeze(0).argmax().item() if args.class_idx is None else args.class_idx
# Use the hooked data to compute activation map
activation_map = extractor(class_idx, scores).cpu()
# Clean data
extractor.clear_hooks()
extractor._hooks_enabled = False
# Convert it to PIL image
# The indexing below means first image in batch
heatmap = to_pil_image(activation_map, mode='F')
# Plot the result
result = overlay_mask(pil_img, heatmap)
axes[idx].imshow(result)
axes[idx].axis('off')
axes[idx].set_title(extractor.__class__.__name__, size=8)
plt.tight_layout()
if args.savefig:
plt.savefig(args.savefig, dpi=200, transparent=True, bbox_inches='tight', pad_inches=0)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Saliency Map comparison',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--model", type=str, default='resnet18', help="The name of your training")
parser.add_argument("--img", type=str,
default='https://www.woopets.fr/assets/races/000/066/big-portrait/border-collie.jpg',
help="The image to extract CAM from")
parser.add_argument("--class-idx", type=int, default=232, help='Index of the class to inspect')
parser.add_argument("--device", type=str, default=None, help='Default device to perform computation on')
parser.add_argument("--savefig", type=str, default=None, help="Path to save figure")
args = parser.parse_args()
main(args)
| [
"torch.device",
"torch.cuda.is_available"
] | 1.1.0 | Alymostafa/torch-cam | 3f30f0db90fba1b921dbe71e979001c954d245da |
1.3 | import torch.nn as nn
from .util_wt_bab import activation_bin, Conv2d_Q
# 通道混合
def channel_shuffle(x, groups):
"""shuffle channels of a 4-D Tensor"""
batch_size, channels, height, width = x.size()
assert channels % groups == 0
channels_per_group = channels // groups
# split into groups
x = x.view(batch_size, groups, channels_per_group, height, width)
# transpose 1, 2 axis
x = x.transpose(1, 2).contiguous()
# reshape into orignal
x = x.view(batch_size, channels, height, width)
return x
# ********************* 量化(三/二值)模块 ************************
class Tnn_Bin_Conv2d(nn.Module):
# 参数:groups-卷积分组数、channel_shuffle-通道混合标志、shuffle_groups-通道混合数(本层需与上一层分组数保持一致)、last_relu|last_bin-尾层卷积输入是否二值(二值:last_relu=0,last_bin=1)
def __init__(self, input_channels, output_channels,
kernel_size=-1, stride=-1, padding=-1, groups=1, channel_shuffle=0, shuffle_groups=1, A=2, W=2, last_relu=0, last_bin=0):
super(Tnn_Bin_Conv2d, self).__init__()
self.channel_shuffle_flag = channel_shuffle
self.shuffle_groups = shuffle_groups
self.last_relu = last_relu
self.last_bin = last_bin
# ********************* 量化(三/二值)卷积 *********************
self.tnn_bin_conv = Conv2d_Q(input_channels, output_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, A=A, W=W)
self.bn = nn.BatchNorm2d(output_channels)
self.relu = nn.ReLU(inplace=True)
self.bin_a = activation_bin(A=A)
def forward(self, x):
if self.channel_shuffle_flag:
x = channel_shuffle(x, groups=self.shuffle_groups)
x = self.tnn_bin_conv(x)
x = self.bn(x)
if self.last_relu:
x = self.relu(x)
if self.last_bin:
x = self.bin_a(x)
return x
class Net(nn.Module):
def __init__(self, cfg = None, A=2, W=2):
super(Net, self).__init__()
# 模型结构与搭建
if cfg is None:
cfg = [256, 256, 256, 512, 512, 512, 1024, 1024]
self.tnn_bin = nn.Sequential(
nn.Conv2d(3, cfg[0], kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(cfg[0]),
Tnn_Bin_Conv2d(cfg[0], cfg[1], kernel_size=1, stride=1, padding=0, groups=2, channel_shuffle=0, A=A, W=W),
Tnn_Bin_Conv2d(cfg[1], cfg[2], kernel_size=1, stride=1, padding=0, groups=2, channel_shuffle=1, shuffle_groups=2, A=A, W=W),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
Tnn_Bin_Conv2d(cfg[2], cfg[3], kernel_size=3, stride=1, padding=1, groups=16, channel_shuffle=1, shuffle_groups=2, A=A, W=W),
Tnn_Bin_Conv2d(cfg[3], cfg[4], kernel_size=1, stride=1, padding=0, groups=4, channel_shuffle=1, shuffle_groups=16, A=A, W=W),
Tnn_Bin_Conv2d(cfg[4], cfg[5], kernel_size=1, stride=1, padding=0, groups=4, channel_shuffle=1, shuffle_groups=4, A=A, W=W),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
Tnn_Bin_Conv2d(cfg[5], cfg[6], kernel_size=3, stride=1, padding=1, groups=32, channel_shuffle=1, shuffle_groups=4, A=A, W=W),
Tnn_Bin_Conv2d(cfg[6], cfg[7], kernel_size=1, stride=1, padding=0, groups=8, channel_shuffle=1, shuffle_groups=32, A=A, W=W, last_relu=0, last_bin=1),#二值量化:last_relu=0, last_bin=1;全精度:last_relu=1, last_bin=0
nn.Conv2d(cfg[7], 10, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(10),
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=8, stride=1, padding=0),
)
# 模型运行
def forward(self, x):
x = self.tnn_bin(x)
x = x.view(x.size(0), -1)
return x
| [
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.3 | xu-peng-tao/SSD-Pruning-and-quantization | 64b84dfa88a1686593addaa9941cc14579e129ee |
0.4 | from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import pytest
import torch
from pyro.contrib.util import (
get_indices, tensor_to_dict, rmv, rvv, lexpand, rexpand, rdiag, rtril
)
from tests.common import assert_equal
def test_get_indices_sizes():
sizes = OrderedDict([("a", 2), ("b", 2), ("c", 2)])
assert_equal(get_indices(["b"], sizes=sizes), torch.tensor([2, 3]))
assert_equal(get_indices(["b", "c"], sizes=sizes), torch.tensor([2, 3, 4, 5]))
tensors = OrderedDict([("a", torch.ones(2)), ("b", torch.ones(2)), ("c", torch.ones(2))])
assert_equal(get_indices(["b"], tensors=tensors), torch.tensor([2, 3]))
assert_equal(get_indices(["b", "c"], tensors=tensors), torch.tensor([2, 3, 4, 5]))
def test_tensor_to_dict():
sizes = OrderedDict([("a", 2), ("b", 2), ("c", 2)])
vector = torch.tensor([1., 2, 3, 4, 5, 6])
assert_equal(tensor_to_dict(sizes, vector), {"a": torch.tensor([1., 2.]),
"b": torch.tensor([3., 4.]),
"c": torch.tensor([5., 6.])})
assert_equal(tensor_to_dict(sizes, vector, subset=["b"]),
{"b": torch.tensor([3., 4.])})
@pytest.mark.parametrize("A,b", [
(torch.tensor([[1., 2.], [2., -3.]]), torch.tensor([-1., 2.]))
])
def test_rmv(A, b):
assert_equal(rmv(A, b), A.mv(b), prec=1e-8)
batched_A = lexpand(A, 5, 4)
batched_b = lexpand(b, 5, 4)
expected_Ab = lexpand(A.mv(b), 5, 4)
assert_equal(rmv(batched_A, batched_b), expected_Ab, prec=1e-8)
@pytest.mark.parametrize("a,b", [
(torch.tensor([1., 2.]), torch.tensor([-1., 2.]))
])
def test_rvv(a, b):
assert_equal(rvv(a, b), torch.dot(a, b), prec=1e-8)
batched_a = lexpand(a, 5, 4)
batched_b = lexpand(b, 5, 4)
expected_ab = lexpand(torch.dot(a, b), 5, 4)
assert_equal(rvv(batched_a, batched_b), expected_ab, prec=1e-8)
def test_lexpand():
A = torch.tensor([[1., 2.], [-2., 0]])
assert_equal(lexpand(A), A, prec=1e-8)
assert_equal(lexpand(A, 4), A.expand(4, 2, 2), prec=1e-8)
assert_equal(lexpand(A, 4, 2), A.expand(4, 2, 2, 2), prec=1e-8)
def test_rexpand():
A = torch.tensor([[1., 2.], [-2., 0]])
assert_equal(rexpand(A), A, prec=1e-8)
assert_equal(rexpand(A, 4), A.unsqueeze(-1).expand(2, 2, 4), prec=1e-8)
assert_equal(rexpand(A, 4, 2), A.unsqueeze(-1).unsqueeze(-1).expand(2, 2, 4, 2), prec=1e-8)
def test_rtril():
A = torch.tensor([[1., 2.], [-2., 0]])
assert_equal(rtril(A), torch.tril(A), prec=1e-8)
expanded = lexpand(A, 5, 4)
expected = lexpand(torch.tril(A), 5, 4)
assert_equal(rtril(expanded), expected, prec=1e-8)
def test_rdiag():
v = torch.tensor([1., 2., -1.])
assert_equal(rdiag(v), torch.diag(v), prec=1e-8)
expanded = lexpand(v, 5, 4)
expeceted = lexpand(torch.diag(v), 5, 4)
assert_equal(rdiag(expanded), expeceted, prec=1e-8)
| [
"torch.tril",
"torch.ones",
"torch.tensor",
"torch.diag",
"torch.dot"
] | 0.4.0 | fluffybird2323/pyro | 9e74e499dbda76c28f12528235dac25bd17f0b1b |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__[:3]) < 0.7:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
import wandb
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
# @property
# def measurable(self):
# return self.count > 0
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, prefix, epoch, num_batches, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
self.prefix = prefix
self.epoch = epoch
self.num_batches = num_batches
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
# Log every step to wandb
stats = {k: meter.global_avg for k, meter in self.meters.items()}
log_stats = {**{f'{self.prefix}_{k}': v for k, v in stats.items()},
'epoch': self.epoch,
'batch_step': i,
'step': self.epoch * self.num_batches + i
}
wandb.log(log_stats)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.stack",
"torch.ones",
"torch.cuda.is_available",
"torch.nn.functional.pad",
"torch.distributed.init_process_group",
"torch.ByteTensor",
"torch.distributed.is_initialized",
"torch.tensor",
"torch.zeros_like",
"torch.distributed.get_rank",
"torch.empty",
"torch.zeros",
"torch.save",
"torch.cuda.max_memory_allocated",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.barrier",
"torch.distributed.is_available",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.distributed.all_gather",
"torch.ByteStorage.from_buffer",
"torch.distributed.all_reduce"
] | 1.5.0 | rehno-lindeque/detr | 65c4f49b2795f68fba57b0f139d02e2dbe8b83ac |
1.2 | import torch
import torch.nn as nn
import scipy.optimize as so
import numpy as np
import torch.nn.functional as F #233
from deeprobust.image.attack.base_attack import BaseAttack
class LBFGS(BaseAttack):
def __init__(self, model, label, device = 'cuda' ):
super(LBFGS, self).__init__(model, device)
def generate(self, image, label, target_label, **kwargs):
assert self.check_type_device(image, label)
assert self.parse_params(**kwargs)
self.target_label = target_label
adv_img, self.dist, self.loss = optimize(self.model,
self.image,
self.label,
self.target_label,
self.bounds,
self.epsilon,
self.maxiter,
self.class_num,
self.device)
return adv_img
def distance(self):
return self.dist
def loss(self):
return self.loss
def parse_params(self,
clip_max = 1,
clip_min = 0,
class_num = 10,
epsilon = 1e-5, #step of finding initial c
maxiter = 20, #maximum of iteration in lbfgs optimization
):
self.epsilon = epsilon
self.maxiter = maxiter
self.class_num = class_num
self.bounds = (clip_min, clip_max)
return True
def optimize(model, image, label, target_label, bounds, epsilon, maxiter, class_num, device):
x_t = image
x0 = image[0].to('cpu').detach().numpy()
min_, max_ = bounds
target_dist = torch.tensor(target_label)
target_dist = target_dist.unsqueeze_(0).long().to(device)
# store the shape for later and operate on the flattened input
shape = x0.shape
dtype = x0.dtype
x0 = x0.flatten().astype(np.float64)
n = len(x0)
bounds = [(min_, max_)] * n
def distance(x,y):
# calculate the distance
x = torch.from_numpy(x).double()
y = torch.from_numpy(y).double()
dist_squ = torch.norm(x - y)
return dist_squ **2
def loss(x, c):
#calculate the target function
v1 = distance(x0,x)
x = torch.tensor(x.astype(dtype).reshape(shape))
x = x.unsqueeze_(0).float().to(device)
predict = model(x)
v2 = F.nll_loss(predict, target_dist)
v = c * v1 + v2
#print(v)
return np.float64(v)
def pending_attack(target_model, adv_exp, target_label):
# pending if the attack success
adv_exp = adv_exp.reshape(shape).astype(dtype)
adv_exp = torch.from_numpy(adv_exp)
adv_exp = adv_exp.unsqueeze_(0).float().to(device)
predict1 = target_model(adv_exp)
label = predict1.argmax(dim=1, keepdim=True)
if label == target_label:
return True
else:
return False
def lbfgs_b(c):
#initial the variables
approx_grad_eps = (max_ - min_) / 100
print('in lbfgs_b:', 'c =', c)
#start optimization
optimize_output, f, d = so.fmin_l_bfgs_b(
loss,
x0,
args=(c,),
approx_grad = True,
bounds = bounds,
m = 15,
maxiter = maxiter,
factr = 1e10, #optimization accuracy
maxls = 5,
epsilon = approx_grad_eps)
print('finish optimization')
# LBFGS-B does not always exactly respect the boundaries
if np.amax(optimize_output) > max_ or np.amin(optimize_output) < min_: # pragma: no coverage
logging.info('Input out of bounds (min, max = {}, {}). Performing manual clip.'.format(
np.amin(optimize_output), np.amax(optimize_output)))
optimize_output = np.clip(optimize_output, min_, max_)
#optimize_output = optimize_output.reshape(shape).astype(dtype)
#test_input = torch.from_numpy(optimize_output)
#print(test_input)
#test_input = test_input.unsqueeze_(0).float()
is_adversarial = pending_attack(target_model = model, adv_exp = optimize_output, target_label = target_label)
return optimize_output, is_adversarial
#x_new, isadv = lbfgs_b(0)
# finding initial c
c = epsilon
print('finding initial c:')
for i in range(30):
c = 2 * c
x_new, is_adversarial = lbfgs_b(c)
if is_adversarial == False:
break
print('start binary search:')
if is_adversarial == True: # pragma: no cover
print('Could not find an adversarial; maybe the model returns wrong gradients')
return
print('c_high:',c)
# binary search
c_low = 0
c_high = c
while c_high - c_low >= epsilon:
print(c_high,' ',c_low)
c_half = (c_low + c_high) / 2
x_new, is_adversarial = lbfgs_b(c_half)
if is_adversarial:
c_low = c_half
else:
c_high = c_half
x_new, is_adversarial = lbfgs_b(c_low)
dis = distance(x_new, x0)
mintargetfunc = loss(x_new, c_low)
x_new = x_new.astype(dtype)
x_new = x_new.reshape(shape)
x_new = torch.from_numpy(x_new).unsqueeze_(0).float().to(device)
return x_new, dis, mintargetfunc
| [
"torch.norm",
"torch.from_numpy",
"torch.tensor",
"torch.nn.functional.nll_loss"
] | 1.2.0 | HenryKenlay/DeepRobust | 3f56dcc45f1fed788423d32cc179c26513416e2e |
1.1 | import torch
import torch.nn as nn
from torchvision.models import resnet152
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, input):
return input.view(input.size()[0], -1)
class AuxConv(nn.Module):
def __init__(self, in_channels, c_tag, stride=1, p=0):
super(AuxConv, self).__init__()
self.aux = nn.Sequential(nn.Conv2d(in_channels, c_tag, kernel_size=(3, 1)),
nn.ReLU(),
nn.Dropout(p),
nn.Conv2d(c_tag, c_tag, kernel_size=(1, 3)),
nn.ReLU(),
nn.Dropout(p),
Flatten())
def forward(self, input):
return self.aux(input)
class DEN(nn.Module):
def __init__(self, backbone_wts=None, backbone_freeze=True, p=0):
super(DEN, self).__init__()
resnet = resnet152(pretrained=False)
if backbone_wts != None:
resnet = self._init_resnet(resnet, backbone_wts)
if backbone_freeze:
for param in resnet.parameters():
param.requires_grad = False
# prepare the network
self._flat_resnet152(resnet)
aux_1024 = [AuxConv(in_channels=1024, c_tag=8, p=p) for _ in range(16)]
aux_2048 = [AuxConv(in_channels=2048, c_tag=64, p=p) for _ in range(3)]
self.aux_modules = nn.ModuleList(aux_1024 + aux_2048)
self._init_added_weights()
def _init_resnet(self, resnet, backbone_wts):
num_ftrs = resnet.fc.in_features
print("fc", num_ftrs, "x", 128*416)
resnet.fc = nn.Linear(num_ftrs, 128 * 416)
resnet.load_state_dict(torch.load(backbone_wts))
return resnet
def _init_added_weights(self):
nn.init.xavier_uniform_(self.fc.weight)
for name,param in self.aux_modules.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(param)
def _flat_resnet152(self, model):
# break the resent to its building blocks
# into a list
flattened = []
flattened += list(model.children())[:4]
for i in range(4,8):
sequence = list(model.children())[i]
flattened += list(sequence.children())
flattened += list(model.children())[-2:]
self.resnet_top = nn.Sequential(*flattened[:35])
# self.resnet_mid = nn.ModuleList(flattened[35:54])
self.resnet_mid = nn.ModuleList(flattened[35:51])
self.avg_pool2d = flattened[54]
self.fc = nn.Linear(25280, 128 * 416)
# self.fc = nn.Linear(59392, 128*416)
def forward(self, input):
# print("right after in den", input.shape)
x = self.resnet_top(input)
# print("after resnet_top", x.shape)
outputs = []
for i, block in enumerate(self.resnet_mid):
x = block(x)
# print("resnet_mid loop", x.shape)
outputs.append(self.aux_modules[i](x))
x = self.avg_pool2d(x)
print("after pooling", x.shape)
x = x.view(x.shape[0], -1)
outputs.append(x)
outputs_concat = torch.cat(outputs, dim=1)
print("output concat", outputs_concat.shape)
out = self.fc(outputs_concat)
print("output shape", out.shape)
return out
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load"
] | 1.1.0 | jinmyeonglee/LKVOLearner | 8d6a167d50942131dc9e379c280f442c37579d37 |
0.4 | from models import GeneratorRRDB
from datasets import denormalize, mean, std
import torch
from torch.autograd import Variable
import argparse
import os
from torchvision.utils import save_image
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", type=str, required=True, help="Path to image")
parser.add_argument("--checkpoint_model", type=str, required=True, help="Path to checkpoint model")
parser.add_argument("--channels", type=int, default=3, help="Number of image channels")
parser.add_argument("--residual_blocks", type=int, default=23, help="Number of residual blocks in G")
opt = parser.parse_args()
print(opt)
os.makedirs("images/outputs", exist_ok=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define model and load model checkpoint
generator = GeneratorRRDB(opt.channels, filters=64, num_res_blocks=opt.residual_blocks).to(device)
generator.load_state_dict(torch.load(opt.checkpoint_model))
generator.eval()
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
# Prepare input
image_tensor = Variable(transform(Image.open(opt.image_path))).to(device).unsqueeze(0)
# Upsample image
with torch.no_grad():
sr_image = denormalize(generator(image_tensor)).cpu()
# Save image
fn = opt.image_path.split("/")[-1]
save_image(sr_image, f"images/outputs/sr-{fn}")
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 0.4.0 | jiseokcube/PyTorch-GAN | 285c260934d37261d4c55fffbbeea32ce308cc53 |
1.4 | from typing import Tuple
import math
import torch
import torchaudio
from torch import Tensor
__all__ = [
'get_mel_banks',
'inverse_mel_scale',
'inverse_mel_scale_scalar',
'mel_scale',
'mel_scale_scalar',
'spectrogram',
'fbank',
'mfcc',
'vtln_warp_freq',
'vtln_warp_mel_freq',
'resample_waveform',
]
# numeric_limits<float>::epsilon() 1.1920928955078125e-07
EPSILON = torch.tensor(torch.finfo(torch.float).eps)
# 1 milliseconds = 0.001 seconds
MILLISECONDS_TO_SECONDS = 0.001
# window types
HAMMING = 'hamming'
HANNING = 'hanning'
POVEY = 'povey'
RECTANGULAR = 'rectangular'
BLACKMAN = 'blackman'
WINDOWS = [HAMMING, HANNING, POVEY, RECTANGULAR, BLACKMAN]
def _get_epsilon(device, dtype):
return EPSILON.to(device=device, dtype=dtype)
def _next_power_of_2(x: int) -> int:
r"""Returns the smallest power of 2 that is greater than x
"""
return 1 if x == 0 else 2 ** (x - 1).bit_length()
def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``)
representing how the window is shifted along the waveform. Each row is a frame.
Args:
waveform (Tensor): Tensor of size ``num_samples``
window_size (int): Frame length
window_shift (int): Frame shift
snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends.
Returns:
Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame
"""
assert waveform.dim() == 1
num_samples = waveform.size(0)
strides = (window_shift * waveform.stride(0), waveform.stride(0))
if snip_edges:
if num_samples < window_size:
return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device)
else:
m = 1 + (num_samples - window_size) // window_shift
else:
reversed_waveform = torch.flip(waveform, [0])
m = (num_samples + (window_shift // 2)) // window_shift
pad = window_size // 2 - window_shift // 2
pad_right = reversed_waveform
if pad > 0:
# torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'
# but we want [2, 1, 0, 0, 1, 2]
pad_left = reversed_waveform[-pad:]
waveform = torch.cat((pad_left, waveform, pad_right), dim=0)
else:
# pad is negative so we want to trim the waveform at the front
waveform = torch.cat((waveform[-pad:], pad_right), dim=0)
sizes = (m, window_size)
return waveform.as_strided(sizes, strides)
def _feature_window_function(window_type: str,
window_size: int,
blackman_coeff: float,
device: torch.device,
dtype: int,
) -> Tensor:
r"""Returns a window function with the given type and size
"""
if window_type == HANNING:
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
elif window_type == HAMMING:
return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
elif window_type == POVEY:
# like hanning but goes to zero at edges
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
elif window_type == RECTANGULAR:
return torch.ones(window_size, device=device, dtype=dtype)
elif window_type == BLACKMAN:
a = 2 * math.pi / (window_size - 1)
window_function = torch.arange(window_size, device=device, dtype=dtype)
# can't use torch.blackman_window as they use different coefficients
return (blackman_coeff - 0.5 * torch.cos(a * window_function) +
(0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype)
else:
raise Exception('Invalid window type ' + window_type)
def _get_log_energy(strided_input: Tensor,
epsilon: Tensor,
energy_floor: float) -> Tensor:
r"""Returns the log energy of size (m) for a strided_input (m,*)
"""
device, dtype = strided_input.device, strided_input.dtype
log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)
if energy_floor == 0.0:
return log_energy
return torch.max(
log_energy, torch.tensor(math.log(energy_floor), device=device, dtype=dtype))
def _get_waveform_and_window_properties(waveform: Tensor,
channel: int,
sample_frequency: float,
frame_shift: float,
frame_length: float,
round_to_power_of_two: bool,
preemphasis_coefficient: float) -> Tuple[Tensor, int, int, int]:
r"""Gets the waveform and window properties
"""
channel = max(channel, 0)
assert channel < waveform.size(0), ('Invalid channel {} for size {}'.format(channel, waveform.size(0)))
waveform = waveform[channel, :] # size (n)
window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS)
window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS)
padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size
assert 2 <= window_size <= len(
waveform), ('choose a window size {} that is [2, {}]'
.format(window_size, len(waveform)))
assert 0 < window_shift, '`window_shift` must be greater than 0'
assert padded_window_size % 2 == 0, 'the padded `window_size` must be divisible by two.' \
' use `round_to_power_of_two` or change `frame_length`'
assert 0. <= preemphasis_coefficient <= 1.0, '`preemphasis_coefficient` must be between [0,1]'
assert sample_frequency > 0, '`sample_frequency` must be greater than zero'
return waveform, window_shift, window_size, padded_window_size
def _get_window(waveform: Tensor,
padded_window_size: int,
window_size: int,
window_shift: int,
window_type: str,
blackman_coeff: float,
snip_edges: bool,
raw_energy: bool,
energy_floor: float,
dither: float,
remove_dc_offset: bool,
preemphasis_coefficient: float) -> Tuple[Tensor, Tensor]:
r"""Gets a window and its log energy
Returns:
(Tensor, Tensor): strided_input of size (m, ``padded_window_size``) and signal_log_energy of size (m)
"""
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
# size (m, window_size)
strided_input = _get_strided(waveform, window_size, window_shift, snip_edges)
if dither != 0.0:
# Returns a random number strictly between 0 and 1
x = torch.max(epsilon, torch.rand(strided_input.shape, device=device, dtype=dtype))
rand_gauss = torch.sqrt(-2 * x.log()) * torch.cos(2 * math.pi * x)
strided_input = strided_input + rand_gauss * dither
if remove_dc_offset:
# Subtract each row/frame by its mean
row_means = torch.mean(strided_input, dim=1).unsqueeze(1) # size (m, 1)
strided_input = strided_input - row_means
if raw_energy:
# Compute the log energy of each row/frame before applying preemphasis and
# window function
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
if preemphasis_coefficient != 0.0:
# strided_input[i,j] -= preemphasis_coefficient * strided_input[i, max(0, j-1)] for all i,j
offset_strided_input = torch.nn.functional.pad(
strided_input.unsqueeze(0), (1, 0), mode='replicate').squeeze(0) # size (m, window_size + 1)
strided_input = strided_input - preemphasis_coefficient * offset_strided_input[:, :-1]
# Apply window_function to each row/frame
window_function = _feature_window_function(
window_type, window_size, blackman_coeff, device, dtype).unsqueeze(0) # size (1, window_size)
strided_input = strided_input * window_function # size (m, window_size)
# Pad columns with zero until we reach size (m, padded_window_size)
if padded_window_size != window_size:
padding_right = padded_window_size - window_size
strided_input = torch.nn.functional.pad(
strided_input.unsqueeze(0), (0, padding_right), mode='constant', value=0).squeeze(0)
# Compute energy after window function (not the raw one)
if not raw_energy:
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
return strided_input, signal_log_energy
def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor:
# subtracts the column mean of the tensor size (m, n) if subtract_mean=True
# it returns size (m, n)
if subtract_mean:
col_means = torch.mean(tensor, dim=0).unsqueeze(0)
tensor = tensor - col_means
return tensor
def spectrogram(waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_duration: float = 0.0,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
window_type: str = POVEY) -> Tensor:
r"""Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi's
compute-spectrogram-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``'povey'``)
Returns:
Tensor: A spectrogram identical to what Kaldi would output. The shape is
(m, ``padded_window_size // 2 + 1``) where m is calculated in _get_strided
"""
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0)
strided_input, signal_log_energy = _get_window(
waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff,
snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient)
# size (m, padded_window_size // 2 + 1, 2)
fft = torch.rfft(strided_input, 1, normalized=False, onesided=True)
# Convert the FFT into a power spectrum
power_spectrum = torch.max(fft.pow(2).sum(2), epsilon).log() # size (m, padded_window_size // 2 + 1)
power_spectrum[:, 0] = signal_log_energy
power_spectrum = _subtract_column_mean(power_spectrum, subtract_mean)
return power_spectrum
def inverse_mel_scale_scalar(mel_freq: float) -> float:
return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0)
def inverse_mel_scale(mel_freq: Tensor) -> Tensor:
return 700.0 * ((mel_freq / 1127.0).exp() - 1.0)
def mel_scale_scalar(freq: float) -> float:
return 1127.0 * math.log(1.0 + freq / 700.0)
def mel_scale(freq: Tensor) -> Tensor:
return 1127.0 * (1.0 + freq / 700.0).log()
def vtln_warp_freq(vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq: float,
high_freq: float,
vtln_warp_factor: float,
freq: Tensor) -> Tensor:
r"""This computes a VTLN warping function that is not the same as HTK's one,
but has similar inputs (this function has the advantage of never producing
empty bins).
This function computes a warp function F(freq), defined between low_freq
and high_freq inclusive, with the following properties:
F(low_freq) == low_freq
F(high_freq) == high_freq
The function is continuous and piecewise linear with two inflection
points.
The lower inflection point (measured in terms of the unwarped
frequency) is at frequency l, determined as described below.
The higher inflection point is at a frequency h, determined as
described below.
If l <= f <= h, then F(f) = f/vtln_warp_factor.
If the higher inflection point (measured in terms of the unwarped
frequency) is at h, then max(h, F(h)) == vtln_high_cutoff.
Since (by the last point) F(h) == h/vtln_warp_factor, then
max(h, h/vtln_warp_factor) == vtln_high_cutoff, so
h = vtln_high_cutoff / max(1, 1/vtln_warp_factor).
= vtln_high_cutoff * min(1, vtln_warp_factor).
If the lower inflection point (measured in terms of the unwarped
frequency) is at l, then min(l, F(l)) == vtln_low_cutoff
This implies that l = vtln_low_cutoff / min(1, 1/vtln_warp_factor)
= vtln_low_cutoff * max(1, vtln_warp_factor)
Args:
vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
low_freq (float): Lower frequency cutoffs in mel computation
high_freq (float): Upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
freq (Tensor): given frequency in Hz
Returns:
Tensor: Freq after vtln warp
"""
assert vtln_low_cutoff > low_freq, 'be sure to set the vtln_low option higher than low_freq'
assert vtln_high_cutoff < high_freq, 'be sure to set the vtln_high option lower than high_freq [or negative]'
l = vtln_low_cutoff * max(1.0, vtln_warp_factor)
h = vtln_high_cutoff * min(1.0, vtln_warp_factor)
scale = 1.0 / vtln_warp_factor
Fl = scale * l # F(l)
Fh = scale * h # F(h)
assert l > low_freq and h < high_freq
# slope of left part of the 3-piece linear function
scale_left = (Fl - low_freq) / (l - low_freq)
# [slope of center part is just "scale"]
# slope of right part of the 3-piece linear function
scale_right = (high_freq - Fh) / (high_freq - h)
res = torch.empty_like(freq)
outside_low_high_freq = torch.lt(freq, low_freq) | torch.gt(freq, high_freq) # freq < low_freq || freq > high_freq
before_l = torch.lt(freq, l) # freq < l
before_h = torch.lt(freq, h) # freq < h
after_h = torch.ge(freq, h) # freq >= h
# order of operations matter here (since there is overlapping frequency regions)
res[after_h] = high_freq + scale_right * (freq[after_h] - high_freq)
res[before_h] = scale * freq[before_h]
res[before_l] = low_freq + scale_left * (freq[before_l] - low_freq)
res[outside_low_high_freq] = freq[outside_low_high_freq]
return res
def vtln_warp_mel_freq(vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq, high_freq: float,
vtln_warp_factor: float,
mel_freq: Tensor) -> Tensor:
r"""
Args:
vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
low_freq (float): Lower frequency cutoffs in mel computation
high_freq (float): Upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
mel_freq (Tensor): Given frequency in Mel
Returns:
Tensor: ``mel_freq`` after vtln warp
"""
return mel_scale(vtln_warp_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, inverse_mel_scale(mel_freq)))
def get_mel_banks(num_bins: int,
window_length_padded: int,
sample_freq: float,
low_freq: float,
high_freq: float,
vtln_low: float,
vtln_high: float,
vtln_warp_factor: float) -> Tuple[Tensor, Tensor]:
"""
Returns:
(Tensor, Tensor): The tuple consists of ``bins`` (which is
melbank of size (``num_bins``, ``num_fft_bins``)) and ``center_freqs`` (which is
center frequencies of bins of size (``num_bins``)).
"""
assert num_bins > 3, 'Must have at least 3 mel bins'
assert window_length_padded % 2 == 0
num_fft_bins = window_length_padded / 2
nyquist = 0.5 * sample_freq
if high_freq <= 0.0:
high_freq += nyquist
assert (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq), \
('Bad values in options: low-freq {} and high-freq {} vs. nyquist {}'.format(low_freq, high_freq, nyquist))
# fft-bin width [think of it as Nyquist-freq / half-window-length]
fft_bin_width = sample_freq / window_length_padded
mel_low_freq = mel_scale_scalar(low_freq)
mel_high_freq = mel_scale_scalar(high_freq)
# divide by num_bins+1 in next line because of end-effects where the bins
# spread out to the sides.
mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1)
if vtln_high < 0.0:
vtln_high += nyquist
assert vtln_warp_factor == 1.0 or ((low_freq < vtln_low < high_freq) and
(0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)), \
('Bad values in options: vtln-low {} and vtln-high {}, versus '
'low-freq {} and high-freq {}'.format(vtln_low, vtln_high, low_freq, high_freq))
bin = torch.arange(num_bins).unsqueeze(1)
left_mel = mel_low_freq + bin * mel_freq_delta # size(num_bins, 1)
center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # size(num_bins, 1)
right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # size(num_bins, 1)
if vtln_warp_factor != 1.0:
left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel)
center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel)
right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel)
center_freqs = inverse_mel_scale(center_mel) # size (num_bins)
# size(1, num_fft_bins)
mel = mel_scale(fft_bin_width * torch.arange(num_fft_bins)).unsqueeze(0)
# size (num_bins, num_fft_bins)
up_slope = (mel - left_mel) / (center_mel - left_mel)
down_slope = (right_mel - mel) / (right_mel - center_mel)
if vtln_warp_factor == 1.0:
# left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values
bins = torch.max(torch.zeros(1), torch.min(up_slope, down_slope))
else:
# warping can move the order of left_mel, center_mel, right_mel anywhere
bins = torch.zeros_like(up_slope)
up_idx = torch.gt(mel, left_mel) & torch.le(mel, center_mel) # left_mel < mel <= center_mel
down_idx = torch.gt(mel, center_mel) & torch.lt(mel, right_mel) # center_mel < mel < right_mel
bins[up_idx] = up_slope[up_idx]
bins[down_idx] = down_slope[down_idx]
return bins, center_freqs
def fbank(waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
use_log_fbank: bool = True,
use_power: bool = True,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY) -> Tensor:
r"""Create a fbank from a raw audio signal. This matches the input/output of Kaldi's
compute-fbank-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
(Default: ``0.0``)
htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features
(need to change other parameters). (Default: ``False``)
low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
use_log_fbank (bool, optional):If true, produce log-filterbank, else produce linear. (Default: ``True``)
use_power (bool, optional): If true, use power, else use magnitude. (Default: ``True``)
vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
negative, offset from high-mel-freq (Default: ``-500.0``)
vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``'povey'``)
Returns:
Tensor: A fbank identical to what Kaldi would output. The shape is (m, ``num_mel_bins + use_energy``)
where m is calculated in _get_strided
"""
device, dtype = waveform.device, waveform.dtype
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0, device=device, dtype=dtype)
# strided_input, size (m, padded_window_size) and signal_log_energy, size (m)
strided_input, signal_log_energy = _get_window(
waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff,
snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient)
# size (m, padded_window_size // 2 + 1, 2)
fft = torch.rfft(strided_input, 1, normalized=False, onesided=True)
power_spectrum = fft.pow(2).sum(2).unsqueeze(1) # size (m, 1, padded_window_size // 2 + 1)
if not use_power:
power_spectrum = power_spectrum.pow(0.5)
# size (num_mel_bins, padded_window_size // 2)
mel_energies, _ = get_mel_banks(num_mel_bins, padded_window_size, sample_frequency,
low_freq, high_freq, vtln_low, vtln_high, vtln_warp)
mel_energies = mel_energies.to(device=device, dtype=dtype)
# pad right column with zeros and add dimension, size (1, num_mel_bins, padded_window_size // 2 + 1)
mel_energies = torch.nn.functional.pad(mel_energies, (0, 1), mode='constant', value=0).unsqueeze(0)
# sum with mel fiterbanks over the power spectrum, size (m, num_mel_bins)
mel_energies = (power_spectrum * mel_energies).sum(dim=2)
if use_log_fbank:
# avoid log of zero (which should be prevented anyway by dithering)
mel_energies = torch.max(mel_energies, _get_epsilon(device, dtype)).log()
# if use_energy then add it as the last column for htk_compat == true else first column
if use_energy:
signal_log_energy = signal_log_energy.unsqueeze(1) # size (m, 1)
# returns size (m, num_mel_bins + 1)
if htk_compat:
mel_energies = torch.cat((mel_energies, signal_log_energy), dim=1)
else:
mel_energies = torch.cat((signal_log_energy, mel_energies), dim=1)
mel_energies = _subtract_column_mean(mel_energies, subtract_mean)
return mel_energies
def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor:
# returns a dct matrix of size (num_mel_bins, num_ceps)
# size (num_mel_bins, num_mel_bins)
dct_matrix = torchaudio.functional.create_dct(num_mel_bins, num_mel_bins, 'ortho')
# kaldi expects the first cepstral to be weighted sum of factor sqrt(1/num_mel_bins)
# this would be the first column in the dct_matrix for torchaudio as it expects a
# right multiply (which would be the first column of the kaldi's dct_matrix as kaldi
# expects a left multiply e.g. dct_matrix * vector).
dct_matrix[:, 0] = math.sqrt(1 / float(num_mel_bins))
dct_matrix = dct_matrix[:, :num_ceps]
return dct_matrix
def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor:
# returns size (num_ceps)
# Compute liftering coefficients (scaling on cepstral coeffs)
# coeffs are numbered slightly differently from HTK: the zeroth index is C0, which is not affected.
i = torch.arange(num_ceps)
return 1.0 + 0.5 * cepstral_lifter * torch.sin(math.pi * i / cepstral_lifter)
def mfcc(
waveform: Tensor,
blackman_coeff: float = 0.42,
cepstral_lifter: float = 22.0,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
num_ceps: int = 13,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY) -> Tensor:
r"""Create a mfcc from a raw audio signal. This matches the input/output of Kaldi's
compute-mfcc-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
cepstral_lifter (float, optional): Constant that controls scaling of MFCCs (Default: ``22.0``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
(Default: ``0.0``)
htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible
features (need to change other parameters). (Default: ``False``)
low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
num_ceps (int, optional): Number of cepstra in MFCC computation (including C0) (Default: ``13``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
negative, offset from high-mel-freq (Default: ``-500.0``)
vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``"povey"``)
Returns:
Tensor: A mfcc identical to what Kaldi would output. The shape is (m, ``num_ceps``)
where m is calculated in _get_strided
"""
assert num_ceps <= num_mel_bins, 'num_ceps cannot be larger than num_mel_bins: %d vs %d' % (num_ceps, num_mel_bins)
device, dtype = waveform.device, waveform.dtype
# The mel_energies should not be squared (use_power=True), not have mean subtracted
# (subtract_mean=False), and use log (use_log_fbank=True).
# size (m, num_mel_bins + use_energy)
feature = fbank(waveform=waveform, blackman_coeff=blackman_coeff, channel=channel,
dither=dither, energy_floor=energy_floor, frame_length=frame_length,
frame_shift=frame_shift, high_freq=high_freq, htk_compat=htk_compat,
low_freq=low_freq, min_duration=min_duration, num_mel_bins=num_mel_bins,
preemphasis_coefficient=preemphasis_coefficient, raw_energy=raw_energy,
remove_dc_offset=remove_dc_offset, round_to_power_of_two=round_to_power_of_two,
sample_frequency=sample_frequency, snip_edges=snip_edges, subtract_mean=False,
use_energy=use_energy, use_log_fbank=True, use_power=True,
vtln_high=vtln_high, vtln_low=vtln_low, vtln_warp=vtln_warp, window_type=window_type)
if use_energy:
# size (m)
signal_log_energy = feature[:, num_mel_bins if htk_compat else 0]
# offset is 0 if htk_compat==True else 1
mel_offset = int(not htk_compat)
feature = feature[:, mel_offset:(num_mel_bins + mel_offset)]
# size (num_mel_bins, num_ceps)
dct_matrix = _get_dct_matrix(num_ceps, num_mel_bins).to(dtype=dtype, device=device)
# size (m, num_ceps)
feature = feature.matmul(dct_matrix)
if cepstral_lifter != 0.0:
# size (1, num_ceps)
lifter_coeffs = _get_lifter_coeffs(num_ceps, cepstral_lifter).unsqueeze(0)
feature *= lifter_coeffs.to(device=device, dtype=dtype)
# if use_energy then replace the last column for htk_compat == true else first column
if use_energy:
feature[:, 0] = signal_log_energy
if htk_compat:
energy = feature[:, 0].unsqueeze(1) # size (m, 1)
feature = feature[:, 1:] # size (m, num_ceps - 1)
if not use_energy:
# scale on C0 (actually removing a scale we previously added that's
# part of one common definition of the cosine transform.)
energy *= math.sqrt(2)
feature = torch.cat((feature, energy), dim=1)
feature = _subtract_column_mean(feature, subtract_mean)
return feature
def _get_LR_indices_and_weights(orig_freq: float,
new_freq: float,
output_samples_in_unit: int,
window_width: float,
lowpass_cutoff: float,
lowpass_filter_width: int,
device: torch.device,
dtype: int) -> Tuple[Tensor, Tensor]:
r"""Based on LinearResample::SetIndexesAndWeights where it retrieves the weights for
resampling as well as the indices in which they are valid. LinearResample (LR) means
that the output signal is at linearly spaced intervals (i.e the output signal has a
frequency of ``new_freq``). It uses sinc/bandlimited interpolation to upsample/downsample
the signal.
The reason why the same filter is not used for multiple convolutions is because the
sinc function could sampled at different points in time. For example, suppose
a signal is sampled at the timestamps (seconds)
0 16 32
and we want it to be sampled at the timestamps (seconds)
0 5 10 15 20 25 30 35
at the timestamp of 16, the delta timestamps are
16 11 6 1 4 9 14 19
at the timestamp of 32, the delta timestamps are
32 27 22 17 12 8 2 3
As we can see from deltas, the sinc function is sampled at different points of time
assuming the center of the sinc function is at 0, 16, and 32 (the deltas [..., 6, 1, 4, ....]
for 16 vs [...., 2, 3, ....] for 32)
Example, one case is when the ``orig_freq`` and ``new_freq`` are multiples of each other then
there needs to be one filter.
A windowed filter function (i.e. Hanning * sinc) because the ideal case of sinc function
has infinite support (non-zero for all values) so instead it is truncated and multiplied by
a window function which gives it less-than-perfect rolloff [1].
[1] Chapter 16: Windowed-Sinc Filters, https://www.dspguide.com/ch16/1.htm
Args:
orig_freq (float): The original frequency of the signal
new_freq (float): The desired frequency
output_samples_in_unit (int): The number of output samples in the smallest repeating unit:
num_samp_out = new_freq / Gcd(orig_freq, new_freq)
window_width (float): The width of the window which is nonzero
lowpass_cutoff (float): The filter cutoff in Hz. The filter cutoff needs to be less
than samp_rate_in_hz/2 and less than samp_rate_out_hz/2.
lowpass_filter_width (int): Controls the sharpness of the filter, more == sharper but less
efficient. We suggest around 4 to 10 for normal use
Returns:
(Tensor, Tensor): A tuple of ``min_input_index`` (which is the minimum indices
where the window is valid, size (``output_samples_in_unit``)) and ``weights`` (which is the weights
which correspond with min_input_index, size (``output_samples_in_unit``, ``max_weight_width``)).
"""
assert lowpass_cutoff < min(orig_freq, new_freq) / 2
output_t = torch.arange(0., output_samples_in_unit, device=device, dtype=dtype) / new_freq
min_t = output_t - window_width
max_t = output_t + window_width
min_input_index = torch.ceil(min_t * orig_freq) # size (output_samples_in_unit)
max_input_index = torch.floor(max_t * orig_freq) # size (output_samples_in_unit)
num_indices = max_input_index - min_input_index + 1 # size (output_samples_in_unit)
max_weight_width = num_indices.max()
# create a group of weights of size (output_samples_in_unit, max_weight_width)
j = torch.arange(max_weight_width, device=device, dtype=dtype).unsqueeze(0)
input_index = min_input_index.unsqueeze(1) + j
delta_t = (input_index / orig_freq) - output_t.unsqueeze(1)
weights = torch.zeros_like(delta_t)
inside_window_indices = delta_t.abs().lt(window_width)
# raised-cosine (Hanning) window with width `window_width`
weights[inside_window_indices] = 0.5 * (1 + torch.cos(2 * math.pi * lowpass_cutoff /
lowpass_filter_width * delta_t[inside_window_indices]))
t_eq_zero_indices = delta_t.eq(0.0)
t_not_eq_zero_indices = ~t_eq_zero_indices
# sinc filter function
weights[t_not_eq_zero_indices] *= torch.sin(
2 * math.pi * lowpass_cutoff * delta_t[t_not_eq_zero_indices]) / (math.pi * delta_t[t_not_eq_zero_indices])
# limit of the function at t = 0
weights[t_eq_zero_indices] *= 2 * lowpass_cutoff
weights /= orig_freq # size (output_samples_in_unit, max_weight_width)
return min_input_index, weights
def _lcm(a: int, b: int) -> int:
return abs(a * b) // math.gcd(a, b)
def _get_num_LR_output_samples(input_num_samp: int,
samp_rate_in: float,
samp_rate_out: float) -> int:
r"""Based on LinearResample::GetNumOutputSamples. LinearResample (LR) means that
the output signal is at linearly spaced intervals (i.e the output signal has a
frequency of ``new_freq``). It uses sinc/bandlimited interpolation to upsample/downsample
the signal.
Args:
input_num_samp (int): The number of samples in the input
samp_rate_in (float): The original frequency of the signal
samp_rate_out (float): The desired frequency
Returns:
int: The number of output samples
"""
# For exact computation, we measure time in "ticks" of 1.0 / tick_freq,
# where tick_freq is the least common multiple of samp_rate_in and
# samp_rate_out.
samp_rate_in = int(samp_rate_in)
samp_rate_out = int(samp_rate_out)
tick_freq = _lcm(samp_rate_in, samp_rate_out)
ticks_per_input_period = tick_freq // samp_rate_in
# work out the number of ticks in the time interval
# [ 0, input_num_samp/samp_rate_in ).
interval_length_in_ticks = input_num_samp * ticks_per_input_period
if interval_length_in_ticks <= 0:
return 0
ticks_per_output_period = tick_freq // samp_rate_out
# Get the last output-sample in the closed interval, i.e. replacing [ ) with
# [ ]. Note: integer division rounds down. See
# http://en.wikipedia.org/wiki/Interval_(mathematics) for an explanation of
# the notation.
last_output_samp = interval_length_in_ticks // ticks_per_output_period
# We need the last output-sample in the open interval, so if it takes us to
# the end of the interval exactly, subtract one.
if last_output_samp * ticks_per_output_period == interval_length_in_ticks:
last_output_samp -= 1
# First output-sample index is zero, so the number of output samples
# is the last output-sample plus one.
num_output_samp = last_output_samp + 1
return num_output_samp
def resample_waveform(waveform: Tensor,
orig_freq: float,
new_freq: float,
lowpass_filter_width: int = 6) -> Tensor:
r"""Resamples the waveform at the new frequency. This matches Kaldi's OfflineFeatureTpl ResampleWaveform
which uses a LinearResample (resample a signal at linearly spaced intervals to upsample/downsample
a signal). LinearResample (LR) means that the output signal is at linearly spaced intervals (i.e
the output signal has a frequency of ``new_freq``). It uses sinc/bandlimited interpolation to
upsample/downsample the signal.
https://ccrma.stanford.edu/~jos/resample/Theory_Ideal_Bandlimited_Interpolation.html
https://github.com/kaldi-asr/kaldi/blob/master/src/feat/resample.h#L56
Args:
waveform (Tensor): The input signal of size (c, n)
orig_freq (float): The original frequency of the signal
new_freq (float): The desired frequency
lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper
but less efficient. We suggest around 4 to 10 for normal use. (Default: ``6``)
Returns:
Tensor: The waveform at the new frequency
"""
device, dtype = waveform.device, waveform.dtype
assert waveform.dim() == 2
assert orig_freq > 0.0 and new_freq > 0.0
min_freq = min(orig_freq, new_freq)
lowpass_cutoff = 0.99 * 0.5 * min_freq
assert lowpass_cutoff * 2 <= min_freq
base_freq = math.gcd(int(orig_freq), int(new_freq))
input_samples_in_unit = int(orig_freq) // base_freq
output_samples_in_unit = int(new_freq) // base_freq
window_width = lowpass_filter_width / (2.0 * lowpass_cutoff)
first_indices, weights = _get_LR_indices_and_weights(
orig_freq, new_freq, output_samples_in_unit,
window_width, lowpass_cutoff, lowpass_filter_width, device, dtype)
assert first_indices.dim() == 1
# TODO figure a better way to do this. conv1d reaches every element i*stride + padding
# all the weights have the same stride but have different padding.
# Current implementation takes the input and applies the various padding before
# doing a conv1d for that specific weight.
conv_stride = input_samples_in_unit
conv_transpose_stride = output_samples_in_unit
num_channels, wave_len = waveform.size()
window_size = weights.size(1)
tot_output_samp = _get_num_LR_output_samples(wave_len, orig_freq, new_freq)
output = torch.zeros((num_channels, tot_output_samp),
device=device, dtype=dtype)
# eye size: (num_channels, num_channels, 1)
eye = torch.eye(num_channels, device=device, dtype=dtype).unsqueeze(2)
for i in range(first_indices.size(0)):
wave_to_conv = waveform
first_index = int(first_indices[i].item())
if first_index >= 0:
# trim the signal as the filter will not be applied before the first_index
wave_to_conv = wave_to_conv[..., first_index:]
# pad the right of the signal to allow partial convolutions meaning compute
# values for partial windows (e.g. end of the window is outside the signal length)
max_unit_index = (tot_output_samp - 1) // output_samples_in_unit
end_index_of_last_window = max_unit_index * conv_stride + window_size
current_wave_len = wave_len - first_index
right_padding = max(0, end_index_of_last_window + 1 - current_wave_len)
left_padding = max(0, -first_index)
if left_padding != 0 or right_padding != 0:
wave_to_conv = torch.nn.functional.pad(wave_to_conv, (left_padding, right_padding))
conv_wave = torch.nn.functional.conv1d(
wave_to_conv.unsqueeze(0), weights[i].repeat(num_channels, 1, 1),
stride=conv_stride, groups=num_channels)
# we want conv_wave[:, i] to be at output[:, i + n*conv_transpose_stride]
dilated_conv_wave = torch.nn.functional.conv_transpose1d(
conv_wave, eye, stride=conv_transpose_stride).squeeze(0)
# pad dilated_conv_wave so it reaches the output length if needed.
dialated_conv_wave_len = dilated_conv_wave.size(-1)
left_padding = i
right_padding = max(0, tot_output_samp - (left_padding + dialated_conv_wave_len))
dilated_conv_wave = torch.nn.functional.pad(
dilated_conv_wave, (left_padding, right_padding))[..., :tot_output_samp]
output += dilated_conv_wave
return output
| [
"torch.rfft",
"torch.cat",
"torch.gt",
"torch.finfo",
"torch.le",
"torch.ones",
"torch.eye",
"torch.lt",
"torch.nn.functional.pad",
"torch.flip",
"torch.ceil",
"torch.hamming_window",
"torch.zeros_like",
"torch.nn.functional.conv_transpose1d",
"torch.empty",
"torch.ge",
"torch.zeros",
"torch.cos",
"torch.min",
"torch.hann_window",
"torch.rand",
"torch.arange",
"torch.sin",
"torch.floor",
"torch.mean",
"torch.empty_like"
] | 1.4.0 | sdarkhovsky/audio | c388ec2b5e6b4d0b99f9c5274d597858e90f5789 |
1.0 | """Classes for specifying probe pytorch modules."""
import torch.nn as nn
import torch
class Probe(nn.Module):
pass
class TwoWordPSDProbe(Probe):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self,model_dim,probe_rank=1024):
print('Constructing TwoWordPSDProbe')
super(TwoWordPSDProbe, self).__init__()
# self.args = args
self.probe_rank = probe_rank # this is the default setting in https://github.com/john-hewitt/structural-probes/blob/4c2e265d6b/example/demo-bert.yaml
self.model_dim = model_dim
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.probe_rank))
nn.init.uniform_(self.proj, -0.05, 0.05)
# self.to(args['device'])
def forward(self, batch):
""" Computes all n^2 pairs of distances after projection
for each sentence in a batch.
Note that due to padding, some distances will be non-zero for pads.
Computes (B(h_i-h_j))^T(B(h_i-h_j)) for all i,j
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of distances of shape (batch_size, max_seq_len, max_seq_len)
"""
transformed = torch.matmul(batch, self.proj)
batchlen, seqlen, rank = transformed.size()
transformed = transformed.unsqueeze(2)
transformed = transformed.expand(-1, -1, seqlen, -1)
transposed = transformed.transpose(1, 2)
diffs = transformed - transposed
squared_diffs = diffs.pow(2)
squared_distances = torch.sum(squared_diffs, -1)
return squared_distances
class OneWordPSDProbe(Probe):
""" Computes squared L2 norm of words after projection by a matrix."""
def __init__(self, args):
print('Constructing OneWordPSDProbe')
super(OneWordPSDProbe, self).__init__()
self.args = args
self.probe_rank = args['probe']['maximum_rank']
self.model_dim = args['model']['hidden_dim']
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.probe_rank))
nn.init.uniform_(self.proj, -0.05, 0.05)
self.to(args['device'])
def forward(self, batch):
""" Computes all n depths after projection
for each sentence in a batch.
Computes (Bh_i)^T(Bh_i) for all i
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of depths of shape (batch_size, max_seq_len)
"""
transformed = torch.matmul(batch, self.proj)
batchlen, seqlen, rank = transformed.size()
norms = torch.bmm(transformed.view(batchlen * seqlen, 1, rank),
transformed.view(batchlen * seqlen, rank, 1))
norms = norms.view(batchlen, seqlen)
return norms
class OneWordNonPSDProbe(Probe):
"""Computes a bilinear affinity between each word representation and itself.
This is different from the probes in A Structural Probe... as the
matrix in the quadratic form is not guaranteed positive semi-definite
"""
def __init__(self, args):
print('Constructing OneWordNonPSDProbe')
super(OneWordNonPSDProbe, self).__init__()
self.args = args
self.model_dim = args['model']['hidden_dim']
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.model_dim))
nn.init.uniform_(self.proj, -0.05, 0.05)
self.to(args['device'])
def forward(self, batch):
""" Computes all n depths after projection
for each sentence in a batch.
Computes (h_i^T)A(h_i) for all i
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of depths of shape (batch_size, max_seq_len)
"""
transformed = torch.matmul(batch, self.proj)
batchlen, seqlen, rank = batch.size()
norms = torch.bmm(transformed.view(batchlen * seqlen, 1, rank),
batch.view(batchlen * seqlen, rank, 1))
norms = norms.view(batchlen, seqlen)
return norms
class TwoWordNonPSDProbe(Probe):
""" Computes a bilinear function of difference vectors.
For a batch of sentences, computes all n^2 pairs of scores
for each sentence in the batch.
"""
def __init__(self, args):
print('TwoWordNonPSDProbe')
super(TwoWordNonPSDProbe, self).__init__()
self.args = args
self.probe_rank = args['probe']['maximum_rank']
self.model_dim = args['model']['hidden_dim']
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.model_dim))
nn.init.uniform_(self.proj, -0.05, 0.05)
self.to(args['device'])
def forward(self, batch):
""" Computes all n^2 pairs of difference scores
for each sentence in a batch.
Note that due to padding, some distances will be non-zero for pads.
Computes (h_i-h_j)^TA(h_i-h_j) for all i,j
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of scores of shape (batch_size, max_seq_len, max_seq_len)
"""
batchlen, seqlen, rank = batch.size()
batch_square = batch.unsqueeze(2).expand(batchlen, seqlen, seqlen, rank)
diffs = (batch_square - batch_square.transpose(1, 2)).view(batchlen * seqlen * seqlen, rank)
psd_transformed = torch.matmul(diffs, self.proj).view(batchlen * seqlen * seqlen, 1, rank)
dists = torch.bmm(psd_transformed, diffs.view(batchlen * seqlen * seqlen, rank, 1))
dists = dists.view(batchlen, seqlen, seqlen)
return dists | [
"torch.zeros",
"torch.nn.init.uniform_",
"torch.matmul",
"torch.sum"
] | 1.0.0 | muziyongshixin/pytorch_SSRP | e54b3098927ba2ff16bdc8f64f3a2bf46d1f72c5 |
3 | import argparse
import os
import os.path as osp
import time
import numpy as np
import open3d as o3d
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
from tqdm import tqdm
from ES import Searcher, Critic, Actor, apply_transform, matrix2vectors
from pose_check.models.uninet_mt import UniNet_MT_V2
from pose_generation.models.vnet import VNet
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Test UCSNet.')
parser.add_argument('--root_path', type=str, help='path to root directory.')
parser.add_argument('--test_list', type=str, default='./pose_generation/dataset/test_list.txt')
parser.add_argument('--save_path', type=str, help='path to save depth maps.')
parser.add_argument('--real_data', action='store_true')
parser.add_argument('--render_ply', action='store_true')
parser.add_argument('--filter', action='store_true')
#test parameters
parser.add_argument('--generator_ckpt', type=str, help='the path for pre-trained model.',
default='./checkpoints/')
parser.add_argument('--stable_critic_ckpt', type=str,
default='./checkpoints/')
parser.add_argument('--pose_num', type=int, default=16)
parser.add_argument('--rot_rp', type=str, default='6d')
parser.add_argument('--z_dim', type=int, default=3)
parser.add_argument('--num_iter', type=int, default=2)
parser.add_argument('--device', type=str, default='cuda')
args = parser.parse_args()
def read_ply(path, pc_len):
pcd = o3d.io.read_point_cloud(path)
point_cloud = np.asarray(pcd.points)
colors = np.asarray(pcd.colors)
if len(point_cloud) < pc_len:
ind = np.random.choice(len(point_cloud), pc_len-len(point_cloud))
point_cloud = np.concatenate([point_cloud, point_cloud[ind]], 0)
elif len(point_cloud) > pc_len:
ind = np.random.choice(len(point_cloud), pc_len)
point_cloud = point_cloud[ind]
return point_cloud
def load_mv_ply(path, num_v=2, pc_len=1024):
assert num_v <= 4
pcs = []
for i in range(num_v):
dir_name = osp.dirname(path)
base_name = osp.basename(path).split('.')[0]+'.v{:04d}.ply'.format(i)
path_i = osp.join(dir_name, base_name)
pcs.append(read_ply(path_i, pc_len))
point_cloud = np.concatenate(pcs, 0)
if len(point_cloud) < pc_len:
ind = np.random.choice(len(point_cloud), pc_len-len(point_cloud))
point_cloud = np.concatenate([point_cloud, point_cloud[ind]], 0)
elif len(point_cloud) > pc_len:
ind = np.random.choice(len(point_cloud), pc_len)
point_cloud = point_cloud[ind]
return point_cloud
def write_ply(points, colors, save_path):
if colors.max() > 1:
div_ = 255.
else:
div_ = 1.
dir_name = osp.dirname(save_path)
os.makedirs(dir_name, exist_ok=True)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.colors = o3d.utility.Vector3dVector(colors / div_)
o3d.io.write_point_cloud(save_path, pcd, write_ascii=False)
def load_data(data_root, data_list, pc_len=1024, is_real=True):
for subject_names in data_list:
subjects = []
for name in subject_names:
sub_path = '{}/{}.ply'.format(data_root, name)
if is_real:
subject_ply = read_ply(sub_path, pc_len=pc_len)
else:
subject_ply = load_mv_ply(sub_path, pc_len=pc_len, num_v=2)
print('pc shape: ', subject_ply.shape)
subject_tensor = torch.from_numpy(subject_ply).float().to(args.device) # (N, 3)
subjects.append(subject_tensor)
yield subjects
def main(args):
# build model
# support = 0, object = 1, mask shape (B, 4, N)
stable_critic = UniNet_MT_V2(mask_channel=True, only_test=True)
generator = VNet(mask_channel=False, rot_rep=args.rot_rp,
z_dim=args.z_dim, obj_feat=128, sup_feat=128, z_feat=64,
only_test=True)
# load checkpoint file specified by args.loadckpt
print("Loading model {} ...".format(args.generator_ckpt))
g_state_dict = torch.load(args.generator_ckpt, map_location=torch.device("cpu"))
generator.load_state_dict(g_state_dict['model'], strict=True)
print('Success!')
print("Loading model {} ...".format(args.stable_critic_ckpt))
s_state_dict = torch.load(args.stable_critic_ckpt, map_location=torch.device("cpu"))
stable_critic.load_state_dict(s_state_dict['model'], strict=True)
print('Success!')
generator = nn.DataParallel(generator)
generator.to(args.device)
generator.eval()
stable_critic = nn.DataParallel(stable_critic)
stable_critic.to(args.device)
stable_critic.eval()
critic = Critic(stable_critic, device=args.device, mini_batch=64, use_filter=args.filter)
actor = Actor(generator, device=args.device, z_dim=args.z_dim, batch_size=1)
data_list = open(args.test_list, 'r').readlines()
data_list = list(map(lambda x: str(x).strip().split('-'), data_list))
data_loader = load_data(args.root_path, data_list,
pc_len=1024, is_real=args.real_data)
for j, candidates in enumerate(tqdm(data_loader)):
pair_id = '-'.join(data_list[j])
print('Processing {} ...'.format(pair_id))
solutions = search_solution(candidates=candidates,
actor=actor, critic=critic,
centralize=True,
num_iter=args.num_iter,
n_samp=args.pose_num,
)
print('Total solutions: ', len(solutions))
save_predictions(candidates, solutions, pair_id, render_ply=args.render_ply)
del candidates
del solutions
torch.cuda.empty_cache()
def post_refine(support_ply, object_ply, init_transform, critic, num_iter=2):
'''
:param support_ply: (N, 3)
:param object_ply: (N, 3)
:param init_transform: (B, 4, 4)
:param critic:
:return:
'''
if num_iter == 0:
init_transform_6d = matrix2vectors(init_transform)
scores = critic(tr6d=init_transform_6d,
support_ply=support_ply,
object_ply=object_ply)
return init_transform, scores
cem_searcher = Searcher(action_dim=6, pop_size=4, parents=2, sigma_init=1e-4,
clip=0.003, damp=0.001, damp_limit=0.00001, device=init_transform.device)
refined_transforms, scores = cem_searcher.search(action_init=init_transform,
support_ply=support_ply,
object_ply=object_ply,
critic=critic,
n_iter=num_iter,
)
return refined_transforms, scores
def search_solution(candidates, actor, critic, centralize, num_iter=2, n_samp=64,):
solutions = []
def dfs(support, layer_id, actions=[]):
if layer_id >= len(candidates):
return
selected = candidates[layer_id] # (N, 3)
tic = time.time()
if centralize:
assert support.shape[1] == 3
assert len(support.shape) == 2
assert selected.shape[1] == 3
assert len(selected.shape) == 2
sup_cent = torch.zeros((1, 3), device=support.device,
dtype=support.dtype)
sup_cent[0, :2] = torch.mean(support, 0, keepdim=True)[0, :2]
sup_cent[0, 2] = torch.min(support, 0, keepdim=True)[0][0, 2]
obj_cent = torch.zeros((1, 3), device=selected.device,
dtype=selected.dtype)
obj_cent[0, :2] = torch.mean(selected, 0, keepdim=True)[0, :2]
obj_cent[0, 2] = torch.min(selected, 0, keepdim=True)[0][0, 2]
support -= sup_cent
selected -= obj_cent
# write_ply(support, np.zeros_like(support), './debug_support.ply')
# write_ply(selected, np.zeros_like(selected), './debug_object.ply')
proposals = actor(support, selected, n_samp=n_samp) # (M, 4, 4)
print('# Time [actor]: {:.2f}'.format(time.time() - tic))
tic = time.time()
proposals, scores = post_refine(support, selected, proposals, critic,
num_iter=num_iter) # (M, 4, 4), (M, )
print('# Time [post refine]: {:.2f}'.format(time.time() - tic))
if centralize:
support += sup_cent
selected += obj_cent
base2cent = torch.eye(4, dtype=proposals.dtype, device=proposals.device).view((1, 4, 4))
base2cent[0, :3, 3] = -obj_cent[0, :3]
cent2base = torch.eye(4, dtype=proposals.dtype, device=proposals.device).view((1, 4, 4))
cent2base[0, :3, 3] = sup_cent[0, :3]
proposals = cent2base @ (proposals @ base2cent)
print('layer {} scores: '.format(layer_id), scores)
# proposals = proposals[scores >= 0.5]
# scores = scores[scores >= 0.5]
print('search layer {}, keep nodes: '.format(layer_id), proposals.shape, scores.shape)
for action_i, score_i in zip(proposals, scores):
actions.append((action_i.detach(), score_i.detach()))
if layer_id == len(candidates)-1:
# collect action seq
solutions.append(actions.copy())
else:
selected_t = apply_transform(action_i, selected) # (N, 3)
next_support = torch.cat([support, selected_t], ) # (2*N, 3)
dfs(next_support, layer_id+1, actions)
actions.pop()
with torch.no_grad():
# [s, o_i, ...]
dfs(candidates[0], 1, [])
return solutions
def save_predictions(candidations, solutions, pair_id, render_ply):
save_dir = osp.join(args.save_path, pair_id)
os.makedirs(save_dir, exist_ok=True)
for ind, solution in enumerate(solutions):
save_one_pair(candidations, solution, save_dir, ind, render_ply=render_ply)
def save_one_pair(point_clouds, solution, save_dir, index, render_ply=True):
t2n = lambda x: x.detach().cpu().numpy()
colors = [[20, 20, 160], [20, 160, 200]]
scores = ['{:.2f}'.format((np.round(x[1].item(), 2))) for x in solution]
transforms = [x[0] for x in solution]
file_name = '_'.join(scores) + '_{:04d}.ply'.format(index)
transforms_np = list(map(t2n, transforms))
mat_name = file_name.replace('.ply', '.npy')
np.save(osp.join(save_dir, mat_name), transforms_np)
if not render_ply:
return
assert len(transforms) + 1 == len(point_clouds)
assert len(point_clouds) == len(colors)
ret = [point_clouds[0], ]
for i in range(len(point_clouds)-1):
subject_i = apply_transform(transforms[i], point_clouds[i+1])
ret.append(subject_i)
ply_buffers = []
for i in range(len(ret)):
points = t2n(ret[i])
color = np.ones((len(points), 1)) @ np.array(colors[i]).reshape((1, 3))
subject_ply = np.concatenate([points, color], 1) # (N, 6)
ply_buffers.append(subject_ply)
full_ply = np.concatenate(ply_buffers, 0)
write_ply(full_ply[:, :3], full_ply[:, 3:], osp.join(save_dir, file_name))
if __name__ == '__main__':
with torch.no_grad():
main(args) | [
"torch.device",
"torch.zeros",
"torch.cat",
"torch.min",
"torch.no_grad",
"torch.from_numpy",
"torch.cuda.empty_cache",
"torch.eye",
"torch.mean",
"torch.nn.DataParallel"
] | 3 | touristCheng/Learning2Regrasp | 2823c8da5506bcf7d6328976a1e1e7ede84d90cb |
1.1 | import torch
from torchio import ScalarImage, RandomAnisotropy
from ...utils import TorchioTestCase
class TestRandomAnisotropy(TorchioTestCase):
"""Tests for `RandomAnisotropy`."""
def test_downsample(self):
transform = RandomAnisotropy(
axes=1,
downsampling=(2., 2.)
)
transformed = transform(self.sample_subject)
self.assertEqual(
self.sample_subject.spacing[1],
transformed.spacing[1],
)
def test_out_of_range_axis(self):
with self.assertRaises(ValueError):
RandomAnisotropy(axes=3)
def test_out_of_range_axis_in_tuple(self):
with self.assertRaises(ValueError):
RandomAnisotropy(axes=(0, -1, 2))
def test_wrong_axes_type(self):
with self.assertRaises(ValueError):
RandomAnisotropy(axes='wrong')
def test_wrong_downsampling_type(self):
with self.assertRaises(ValueError):
RandomAnisotropy(downsampling='wrong')
def test_below_one_downsampling(self):
with self.assertRaises(ValueError):
RandomAnisotropy(downsampling=0.2)
def test_2d_rgb(self):
image = ScalarImage(tensor=torch.rand(3, 4, 5, 6))
RandomAnisotropy()(image)
| [
"torch.rand"
] | 1.1 | Linardos/torchio | b0555fc939960128d37e56c27edcfc74a3a967e3 |
1.1 | from collections import defaultdict
from typing import Tuple, Union, Dict
import torch
import numpy as np
from ....data.subject import Subject
from ... import IntensityTransform, FourierTransform
from .. import RandomTransform
class RandomSpike(RandomTransform, IntensityTransform, FourierTransform):
r"""Add random MRI spike artifacts.
Also known as `Herringbone artifact
<https://radiopaedia.org/articles/herringbone-artifact?lang=gb>`_,
crisscross artifact or corduroy artifact, it creates stripes in different
directions in image space due to spikes in k-space.
Args:
num_spikes: Number of spikes :math:`n` present in k-space.
If a tuple :math:`(a, b)` is provided, then
:math:`n \sim \mathcal{U}(a, b) \cap \mathbb{N}`.
If only one value :math:`d` is provided,
:math:`n \sim \mathcal{U}(0, d) \cap \mathbb{N}`.
Larger values generate more distorted images.
intensity: Ratio :math:`r` between the spike intensity and the maximum
of the spectrum.
If a tuple :math:`(a, b)` is provided, then
:math:`r \sim \mathcal{U}(a, b)`.
If only one value :math:`d` is provided,
:math:`r \sim \mathcal{U}(-d, d)`.
Larger values generate more distorted images.
**kwargs: See :class:`~torchio.transforms.Transform` for additional
keyword arguments.
.. note:: The execution time of this transform does not depend on the
number of spikes.
"""
def __init__(
self,
num_spikes: Union[int, Tuple[int, int]] = 1,
intensity: Union[float, Tuple[float, float]] = (1, 3),
**kwargs
):
super().__init__(**kwargs)
self.intensity_range = self._parse_range(
intensity, 'intensity_range')
self.num_spikes_range = self._parse_range(
num_spikes, 'num_spikes', min_constraint=0, type_constraint=int)
def apply_transform(self, subject: Subject) -> Subject:
arguments = defaultdict(dict)
for image_name in self.get_images_dict(subject):
spikes_positions_param, intensity_param = self.get_params(
self.num_spikes_range,
self.intensity_range,
)
arguments['spikes_positions'][image_name] = spikes_positions_param
arguments['intensity'][image_name] = intensity_param
transform = Spike(**self.add_include_exclude(arguments))
transformed = transform(subject)
return transformed
def get_params(
self,
num_spikes_range: Tuple[int, int],
intensity_range: Tuple[float, float],
) -> Tuple[np.ndarray, float]:
ns_min, ns_max = num_spikes_range
num_spikes_param = torch.randint(ns_min, ns_max + 1, (1,)).item()
intensity_param = self.sample_uniform(*intensity_range)
spikes_positions = torch.rand(num_spikes_param, 3).numpy()
return spikes_positions, intensity_param.item()
class Spike(IntensityTransform, FourierTransform):
r"""Add MRI spike artifacts.
Also known as `Herringbone artifact
<https://radiopaedia.org/articles/herringbone-artifact?lang=gb>`_,
crisscross artifact or corduroy artifact, it creates stripes in different
directions in image space due to spikes in k-space.
Args:
spikes_positions:
intensity: Ratio :math:`r` between the spike intensity and the maximum
of the spectrum.
**kwargs: See :class:`~torchio.transforms.Transform` for additional
keyword arguments.
.. note:: The execution time of this transform does not depend on the
number of spikes.
"""
def __init__(
self,
spikes_positions: Union[np.ndarray, Dict[str, np.ndarray]],
intensity: Union[float, Dict[str, float]],
**kwargs
):
super().__init__(**kwargs)
self.spikes_positions = spikes_positions
self.intensity = intensity
self.args_names = 'spikes_positions', 'intensity'
self.invert_transform = False
def apply_transform(self, subject: Subject) -> Subject:
spikes_positions = self.spikes_positions
intensity = self.intensity
for image_name, image in self.get_images_dict(subject).items():
if self.arguments_are_dict():
spikes_positions = self.spikes_positions[image_name]
intensity = self.intensity[image_name]
transformed_tensors = []
for channel in image.data:
transformed_tensor = self.add_artifact(
channel,
spikes_positions,
intensity,
)
transformed_tensors.append(transformed_tensor)
image.set_data(torch.stack(transformed_tensors))
return subject
def add_artifact(
self,
tensor: torch.Tensor,
spikes_positions: np.ndarray,
intensity_factor: float,
):
array = np.asarray(tensor)
spectrum = self.fourier_transform(array)
shape = np.array(spectrum.shape)
mid_shape = shape // 2
indices = np.floor(spikes_positions * shape).astype(int)
for index in indices:
diff = index - mid_shape
i, j, k = mid_shape + diff
artifact = spectrum.max() * intensity_factor
if self.invert_transform:
spectrum[i, j, k] -= artifact
else:
spectrum[i, j, k] += artifact
# If we wanted to add a pure cosine, we should add spikes to both
# sides of k-space. However, having only one is a better
# representation og the actual cause of the artifact in real
# scans. Therefore the next two lines have been removed.
# #i, j, k = mid_shape - diff
# #spectrum[i, j, k] = spectrum.max() * intensity_factor
result = np.real(self.inv_fourier_transform(spectrum))
return torch.from_numpy(result.astype(np.float32))
| [
"torch.rand",
"torch.stack",
"torch.randint"
] | 1.1 | Linardos/torchio | b0555fc939960128d37e56c27edcfc74a3a967e3 |
1.10 | from sklearn.model_selection import StratifiedKFold
import os, sys
# DECLARE HOW MANY GPUS YOU WISH TO USE.
# KAGGLE ONLY HAS 1, BUT OFFLINE, YOU CAN USE MORE
import argparse
def get_args():
parser = argparse.ArgumentParser()
#parser.add_argument('--disc_type', type=int, default=0, help='disc_type')
parser.add_argument('--fold', type=int, default=0, help='fold')
parser.add_argument('--gpu_id', type=str, default='0', help='gpu_id')
opts = parser.parse_args()
return opts
args=get_args()
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu_id) #0,1,2,3 for four gpu
out_dir='seqclassifiers_v3'
os.system(f'mkdir {out_dir}')
# VERSION FOR SAVING MODEL WEIGHTS
VER=26
# IF VARIABLE IS NONE, THEN NOTEBOOK COMPUTES TOKENS
# OTHERWISE NOTEBOOK LOADS TOKENS FROM PATH
LOAD_TOKENS_FROM = '../../input/py-bigbird-v26'
# IF VARIABLE IS NONE, THEN NOTEBOOK TRAINS A NEW MODEL
# OTHERWISE IT LOADS YOUR PREVIOUSLY TRAINED MODEL
LOAD_MODEL_FROM = 'models'
# Use the entire ensemble.
ENSEMBLE_IDS = [args.fold]
# Setting Fold = None leaves out an arbitrary 10% of the dataset for sequence classifier training.
# Setting Fold to one of [0,1,2,3,4] leaves out the portion of the dataset not trained on by the corresponding ensemble model.
# 'half' leaves out an arbitrary 50%.
FOLD = args.fold
# print(FOLD)
# exit()
# IF FOLLOWING IS NONE, THEN NOTEBOOK
# USES INTERNET AND DOWNLOADS HUGGINGFACE
# CONFIG, TOKENIZER, AND MODEL
DOWNLOADED_MODEL_PATH = '../../input/deberta-xlarge/'
if DOWNLOADED_MODEL_PATH is None:
DOWNLOADED_MODEL_PATH = 'model'
MODEL_NAME = 'allenai/longformer-large-4096'
# Tune the probability threshold for sequence classifiers to maximize F1
TRAIN_SEQ_CLASSIFIERS = False
KAGGLE_CACHE = 'cache' #location of valid_pred files
cache = 'cache' #save location of valid_seqds files
cacheExists = os.path.exists(cache)
if not cacheExists:
os.makedirs(cache)
print(ENSEMBLE_IDS)
# In[90]:
# skopt optimizer has a bug when scipy is installed with its default version
if TRAIN_SEQ_CLASSIFIERS:
os.system('pip install --no-dependencies scipy==1.5.2 ')
# In[91]:
from torch import cuda
config = {'model_name': MODEL_NAME,
'max_length': 2048,
'train_batch_size':4,
'valid_batch_size':1,
'epochs':5,
'learning_rates': [2.5e-5, 2.5e-5, 2.5e-6, 2.5e-6, 2.5e-7],
'max_grad_norm':10,
'device': 'cuda' if cuda.is_available() else 'cpu'}
# # How To Submit PyTorch Without Internet
# Many people ask me, how do I submit PyTorch models without internet? With HuggingFace Transformer, it's easy. Just download the following 3 things (1) model weights, (2) tokenizer files, (3) config file, and upload them to a Kaggle dataset. Below shows code how to get the files from HuggingFace for Google's BigBird-base. But this same code can download any transformer, like for example roberta-base.
# In[92]:
from transformers import *
if DOWNLOADED_MODEL_PATH == 'model':
os.mkdir('model')
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, add_prefix_space=True)
tokenizer.save_pretrained('model')
config_model = AutoConfig.from_pretrained(MODEL_NAME)
config_model.num_labels = 15
config_model.save_pretrained('model')
backbone = AutoModelForTokenClassification.from_pretrained(MODEL_NAME,
config=config_model)
backbone.save_pretrained('model')
# # Load Data and Libraries
# In addition to loading the train dataframe, we will load all the train and text files and save them in a dataframe.
# In[93]:
import numpy as np, os
from scipy import stats
import pandas as pd, gc
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForTokenClassification, AdamW
from torch.utils.data import Dataset, DataLoader
import torch
from sklearn.metrics import accuracy_score
from torch.cuda import amp
# In[94]:
train_df = pd.read_csv('../../input/feedback-prize-2021/train.csv')
print( train_df.shape )
train_df.head()
# In[95]:
# https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
test_names, test_texts = [], []
for f in list(os.listdir('../../input/feedback-prize-2021/test')):
test_names.append(f.replace('.txt', ''))
test_texts.append(open('../../input/feedback-prize-2021/test/' + f, 'r').read())
test_texts = pd.DataFrame({'id': test_names, 'text': test_texts})
test_texts['len']=test_texts['text'].apply(lambda x:len(x.split()))
test_texts=test_texts.sort_values(by=['len']).reset_index()
test_texts
SUBMISSION = False
if len(test_names) > 5:
SUBMISSION = True
test_texts.head()
# In[96]:
# https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
test_names, train_texts = [], []
for f in tqdm(list(os.listdir('../../input/feedback-prize-2021/train'))):
test_names.append(f.replace('.txt', ''))
train_texts.append(open('../../input/feedback-prize-2021/train/' + f, 'r').read())
train_text_df = pd.DataFrame({'id': test_names, 'text': train_texts})
train_text_df.head()
# # Convert Train Text to NER Labels
# We will now convert all text words into NER labels and save in a dataframe.
# In[97]:
if not LOAD_TOKENS_FROM:
all_entities = []
for ii,i in enumerate(train_text_df.iterrows()):
if ii%100==0: print(ii,', ',end='')
total = i[1]['text'].split().__len__()
entities = ["O"]*total
for j in train_df[train_df['id'] == i[1]['id']].iterrows():
discourse = j[1]['discourse_type']
list_ix = [int(x) for x in j[1]['predictionstring'].split(' ')]
entities[list_ix[0]] = f"B-{discourse}"
for k in list_ix[1:]: entities[k] = f"I-{discourse}"
all_entities.append(entities)
train_text_df['entities'] = all_entities
train_text_df.to_csv('train_NER.csv',index=False)
else:
from ast import literal_eval
train_text_df = pd.read_csv(f'{LOAD_TOKENS_FROM}/train_NER.csv')
# pandas saves lists as string, we must convert back
train_text_df.entities = train_text_df.entities.apply(lambda x: literal_eval(x) )
print( train_text_df.shape )
train_text_df.head()
# In[98]:
# CREATE DICTIONARIES THAT WE CAN USE DURING TRAIN AND INFER
output_labels = ['O', 'B-Lead', 'I-Lead', 'B-Position', 'I-Position', 'B-Claim', 'I-Claim', 'B-Counterclaim', 'I-Counterclaim',
'B-Rebuttal', 'I-Rebuttal', 'B-Evidence', 'I-Evidence', 'B-Concluding Statement', 'I-Concluding Statement']
labels_to_ids = {v:k for k,v in enumerate(output_labels)}
ids_to_labels = {k:v for k,v in enumerate(output_labels)}
disc_type_to_ids = {'Evidence':(11,12),'Claim':(5,6),'Lead':(1,2),'Position':(3,4),'Counterclaim':(7,8),'Rebuttal':(9,10),'Concluding Statement':(13,14)}
# In[99]:
labels_to_ids
# # Define the dataset function
# Below is our PyTorch dataset function. It always outputs tokens and attention. During training it also provides labels. And during inference it also provides word ids to help convert token predictions into word predictions.
#
# Note that we use `text.split()` and `is_split_into_words=True` when we convert train text to labeled train tokens. This is how the HugglingFace tutorial does it. However, this removes characters like `\n` new paragraph. If you want your model to see new paragraphs, then we need to map words to tokens ourselves using `return_offsets_mapping=True`. See my TensorFlow notebook [here][1] for an example.
#
# Some of the following code comes from the example at HuggingFace [here][2]. However I think the code at that link is wrong. The HuggingFace original code is [here][3]. With the flag `LABEL_ALL` we can either label just the first subword token (when one word has more than one subword token). Or we can label all the subword tokens (with the word's label). In this notebook version, we label all the tokens. There is a Kaggle discussion [here][4]
#
# [1]: https://www.kaggle.com/cdeotte/tensorflow-longformer-ner-cv-0-617
# [2]: https://huggingface.co/docs/transformers/custom_datasets#tok_ner
# [3]: https://github.com/huggingface/transformers/blob/86b40073e9aee6959c8c85fcba89e47b432c4f4d/examples/pytorch/token-classification/run_ner.py#L371
# [4]: https://www.kaggle.com/c/feedback-prize-2021/discussion/296713
# In[100]:
# Return an array that maps character index to index of word in list of split() words
def split_mapping(unsplit):
splt = unsplit.split()
offset_to_wordidx = np.full(len(unsplit),-1)
txt_ptr = 0
for split_index, full_word in enumerate(splt):
while unsplit[txt_ptr:txt_ptr + len(full_word)] != full_word:
txt_ptr += 1
offset_to_wordidx[txt_ptr:txt_ptr + len(full_word)] = split_index
txt_ptr += len(full_word)
return offset_to_wordidx
def iter_split(data,labels,fold,nfolds=5,seed=2020):
splits = StratifiedKFold(n_splits=nfolds, random_state=seed, shuffle=True)
splits = list(splits.split(data,labels))
# splits = np.zeros(len(data)).astype(np.int)
# for i in range(nfolds): splits[splits[i][1]] = i
# indices=np.arange(len(data))
train_indices=splits[fold][0]
val_indices=splits[fold][1]
return train_indices, val_indices
# In[101]:
class dataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len, get_wids):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
self.get_wids = get_wids # for validation
def __getitem__(self, index):
# GET TEXT AND WORD LABELS
text = self.data.text[index]
word_labels = self.data.entities[index] if not self.get_wids else None
# TOKENIZE TEXT
encoding = self.tokenizer(text,
return_offsets_mapping=True,
padding=False,
truncation=True,
max_length=self.max_len)
word_ids = encoding.word_ids()
split_word_ids = np.full(len(word_ids),-1)
offset_to_wordidx = split_mapping(text)
offsets = encoding['offset_mapping']
# CREATE TARGETS AND MAPPING OF TOKENS TO SPLIT() WORDS
label_ids = []
# Iterate in reverse to label whitespace tokens until a Begin token is encountered
for token_idx, word_idx in reversed(list(enumerate(word_ids))):
if word_idx is None:
if not self.get_wids: label_ids.append(-100)
else:
if offsets[token_idx][0] != offsets[token_idx][1]:
#Choose the split word that shares the most characters with the token if any
split_idxs = offset_to_wordidx[offsets[token_idx][0]:offsets[token_idx][1]]
split_index = stats.mode(split_idxs[split_idxs != -1]).mode[0] if len(np.unique(split_idxs)) > 1 else split_idxs[0]
if split_index != -1:
if not self.get_wids: label_ids.append( labels_to_ids[word_labels[split_index]] )
split_word_ids[token_idx] = split_index
else:
# Even if we don't find a word, continue labeling 'I' tokens until a 'B' token is found
if label_ids and label_ids[-1] != -100 and ids_to_labels[label_ids[-1]][0] == 'I':
split_word_ids[token_idx] = split_word_ids[token_idx + 1]
if not self.get_wids: label_ids.append(label_ids[-1])
else:
if not self.get_wids: label_ids.append(-100)
else:
if not self.get_wids: label_ids.append(-100)
encoding['labels'] = list(reversed(label_ids))
# CONVERT TO TORCH TENSORS
item = {key: torch.as_tensor(val) for key, val in encoding.items()}
if self.get_wids:
item['wids'] = torch.as_tensor(split_word_ids)
return item
def __len__(self):
return self.len
class CustomCollate:
def __init__(self,tokenizer,sliding_window=None):
self.tokenizer=tokenizer
self.sliding_window=sliding_window
def __call__(self,data):
"""
need to collate: input_ids, attention_mask, labels
input_ids is padded with 1, attention_mask 0, labels -100
"""
bs=len(data)
lengths=[]
for i in range(bs):
lengths.append(len(data[i]['input_ids']))
# print(data[i]['input_ids'].shape)
# print(data[i]['attention_mask'].shape)
# print(data[i]['labels'].shape)
max_len=max(lengths)
if self.sliding_window is not None and max_len > self.sliding_window:
max_len= int((np.floor(max_len/self.sliding_window-1e-6)+1)*self.sliding_window)
#always pad the right side
input_ids, attention_mask, labels, BIO_labels, discourse_labels=[],[],[],[],[]
#if np.random.uniform()>0.5:
#print(data[0].keys())
# print(data[0].keys())
# exit()
#print(max_len)
if 'wids' in data[0]:
get_wids=True
else:
get_wids=False
#print(get_wids)
wids = []
#wids.append(torch.nn.functional.pad(data[i]['wids'],(0,max_len-lengths[i]),value=-1))
for i in range(bs):
input_ids.append(torch.nn.functional.pad(data[i]['input_ids'],(0,max_len-lengths[i]),value=self.tokenizer.pad_token_id))
attention_mask.append(torch.nn.functional.pad(data[i]['attention_mask'],(0,max_len-lengths[i]),value=0))
#labels.append(torch.nn.functional.pad(data[i]['labels'],(0,max_len-lengths[i]),value=-100))
#BIO_labels.append(torch.nn.functional.pad(data[i]['BIO_labels'],(0,max_len-lengths[i]),value=-100))
#discourse_labels.append(torch.nn.functional.pad(data[i]['discourse_labels'],(0,max_len-lengths[i]),value=-100))
if get_wids:
wids.append(torch.nn.functional.pad(data[i]['wids'],(0,max_len-lengths[i]),value=-1))
# else:
# for i in range(bs):
# input_ids.append(torch.nn.functional.pad(data[i]['input_ids'],(max_len-lengths[i],0),value=1))
# attention_mask.append(torch.nn.functional.pad(data[i]['attention_mask'],(max_len-lengths[i],0),value=0))
# labels.append(torch.nn.functional.pad(data[i]['labels'],(max_len-lengths[i],0),value=-100))
input_ids=torch.stack(input_ids)
attention_mask=torch.stack(attention_mask)
#labels=torch.stack(labels)
#BIO_labels=torch.stack(BIO_labels)
#discourse_labels=torch.stack(discourse_labels)
if get_wids:
wids=torch.stack(wids)
#exit()
if get_wids:
return {"input_ids":input_ids,"attention_mask":attention_mask,
"labels":labels,"BIO_labels":BIO_labels,"discourse_labels":discourse_labels,
"wids":wids}
else:
return {"input_ids":input_ids,"attention_mask":attention_mask,
"labels":labels,"BIO_labels":BIO_labels,"discourse_labels":discourse_labels}
# # Create Train and Validation Dataloaders
# We will use the same train and validation subsets as my TensorFlow notebook [here][1]. Then we can compare results. And/or experiment with ensembling the validation fold predictions.
#
# [1]: https://www.kaggle.com/cdeotte/tensorflow-longformer-ner-cv-0-617
# In[102]:
# CHOOSE VALIDATION INDEXES (that match my TF notebook)
IDS = train_df.id.unique()
np.random.seed(42)
if FOLD == 'half':
train_idx = np.random.choice(np.arange(len(IDS)),int(0.5*len(IDS)),replace=False)
valid_idx = np.setdiff1d(np.arange(len(IDS)),train_idx)
elif FOLD == 'full':
train_idx = np.random.choice(np.arange(len(IDS)),int(0.5*len(IDS)),replace=False)
valid_idx = np.arange(len(IDS))
# elif FOLD is not None:
# print('There are',len(IDS),'train texts. We will split 93% 7% for ensemble training.')
# shuffled_ids = np.arange(len(IDS))
# np.random.shuffle(shuffled_ids)
#
# valid_len = int(.07 * len(IDS))
# valid_idx = shuffled_ids[FOLD*valid_len:(FOLD+1)*valid_len]
# train_idx = np.setdiff1d(np.arange(len(IDS)),valid_idx)
else:
print('There are',len(IDS),'train texts. We will split 90% 10% for ensemble training.')
#train_idx = np.random.choice(np.arange(len(IDS)),int(0.9*len(IDS)),replace=False)
#valid_idx = np.setdiff1d(np.arange(len(IDS)),train_idx)
train_idx, valid_idx= iter_split(np.arange(len(IDS)),np.ones(len(IDS)),args.fold,nfolds=8)
TRAIN_IDS=IDS[train_idx]
VAL_IDS=IDS[valid_idx]
# print(len(valid_idx))
# exit()
# print(VAL_IDS)
# exit()
np.random.seed(None)
# In[103]:
# CREATE TRAIN SUBSET AND VALID SUBSET
data = train_text_df[['id','text', 'entities']]
train_dataset = data.loc[data['id'].isin(IDS[train_idx]),['text', 'entities']].reset_index(drop=True)
test_dataset = data.loc[data['id'].isin(IDS[valid_idx])].reset_index(drop=True)
print(test_dataset.id)
# # print(VAL_IDS)
#exit()
print("FULL Dataset: {}".format(data.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(test_dataset.shape))
tokenizer = AutoTokenizer.from_pretrained(DOWNLOADED_MODEL_PATH)
training_set = dataset(train_dataset, tokenizer, config['max_length'], False)
testing_set = dataset(test_dataset, tokenizer, config['max_length'], True)
# In[111]:
# TRAIN DATASET AND VALID DATASET
train_params = {'batch_size': config['train_batch_size'],
'shuffle': True,
'num_workers': 2,
'pin_memory':True
}
test_params = {'batch_size': config['valid_batch_size'],
'shuffle': False,
'num_workers': 2,
'pin_memory':True
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params,collate_fn=CustomCollate(tokenizer))
# TEST DATASET
test_texts_set = dataset(test_texts, tokenizer, config['max_length'], True)
test_texts_loader = DataLoader(test_texts_set, **test_params,collate_fn=CustomCollate(tokenizer))
#exit()
# In[112]:
from transformers import *
import torch.nn as nn
import torch.nn.functional as F
rearrange_indices=[14, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
from transformers import *
import torch.nn as nn
import torch.nn.functional as F
class ResidualLSTM(nn.Module):
def __init__(self, d_model, rnn='GRU'):
super(ResidualLSTM, self).__init__()
self.downsample=nn.Linear(d_model,d_model//2)
if rnn=='GRU':
self.LSTM=nn.GRU(d_model//2, d_model//2, num_layers=2, bidirectional=False, dropout=0.2)
else:
self.LSTM=nn.LSTM(d_model//2, d_model//2, num_layers=2, bidirectional=False, dropout=0.2)
self.dropout1=nn.Dropout(0.2)
self.norm1= nn.LayerNorm(d_model//2)
self.linear1=nn.Linear(d_model//2, d_model*4)
self.linear2=nn.Linear(d_model*4, d_model)
self.dropout2=nn.Dropout(0.2)
self.norm2= nn.LayerNorm(d_model)
def forward(self, x):
x=x.permute(1,0,2)
res=x
x=self.downsample(x)
x, _ = self.LSTM(x)
x=self.dropout1(x)
x=self.norm1(x)
x=F.relu(self.linear1(x))
x=self.linear2(x)
x=self.dropout2(x)
x=res+x
x=x.permute(1,0,2)
return self.norm2(x)
def noop(x): return x
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding="same", use_bn=False):
super().__init__()
self.idconv = noop if in_channels == out_channels \
else nn.Conv1d(in_channels, out_channels, 1, stride=1)
if padding == "same":
padding = kernel_size // 2 * dilation
if use_bn:
self.conv = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, stride=stride, dilation=dilation),
nn.BatchNorm1d(out_channels),
)
else:
self.conv = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, stride=stride, dilation=dilation),
)
def forward(self, x):
return F.relu(self.conv(x) + self.idconv(x))
class ResNet(nn.Module):
def __init__(self, use_msd=False,
cnn_dim=512, input_dim=1024, kernel_sizes=[3,5,7,9], use_bn=False):
super().__init__()
self.use_msd = use_msd
self.cnn = nn.Sequential(
ResBlock(input_dim, cnn_dim, kernel_size=kernel_sizes[0], use_bn=use_bn),
ResBlock(cnn_dim, cnn_dim, kernel_size=kernel_sizes[1], use_bn=use_bn),
# ResBlock(cnn_dim, cnn_dim, kernel_size=kernel_sizes[2], use_bn=use_bn),
# ResBlock(cnn_dim, cnn_dim, kernel_size=kernel_sizes[3], use_bn=use_bn),
)
self.logits = nn.Linear(cnn_dim, 1024)
self.high_dropout = nn.Dropout(p=0.5)
self.dropout1 = nn.Dropout(p=0.1)
self.dropout2 = nn.Dropout(p=0.1)
def forward(self, x):
x = x.permute(0,2,1)
features = self.cnn(self.dropout1(x)).permute(0, 2, 1) # [Bs x T x nb_ft]
# print(f'features: {features.shape}')
#if self.use_msd and self.training:
features = torch.mean(
torch.stack(
[self.high_dropout(features) for _ in range(5)],
dim=0,
),
dim=0,
)
features=self.logits(features)
# else:
# logits = self.logits(self.dropout2(features))
# print(f'logits: {logits.shape}')
return features
class SlidingWindowTransformerModel(nn.Module):
def __init__(self,DOWNLOADED_MODEL_PATH, rnn='GRU', window_size=512, edge_len=64, no_backbone=False):
super(SlidingWindowTransformerModel, self).__init__()
config_model = AutoConfig.from_pretrained(DOWNLOADED_MODEL_PATH+'/config.json')
self.no_backbone=no_backbone
if no_backbone:
pass
else:
self.backbone=AutoModel.from_pretrained(
DOWNLOADED_MODEL_PATH+'/pytorch_model.bin',config=config_model)
if rnn=="GRU" or rnn=='LSTM':
self.lstm=ResidualLSTM(1024,rnn)
else:
self.lstm=ResNet()
self.classification_head=nn.Linear(1024,15)
self.window_size=window_size
self.edge_len=edge_len
self.inner_len=window_size-edge_len*2
def forward(self,input_ids,attention_mask,return_transformer_hidden_states=False):
# print(L)
# exit()
#x=self.backbone(input_ids=input_ids,attention_mask=attention_mask,return_dict=False)[0]
if self.no_backbone==False:
B,L=input_ids.shape
if L<=self.window_size:
x=self.backbone(input_ids=input_ids,attention_mask=attention_mask,return_dict=False)[0]
#pass
else:
#print("####")
#print(input_ids.shape)
segments=(L-self.window_size)//self.inner_len
if (L-self.window_size)%self.inner_len>self.edge_len:
segments+=1
elif segments==0:
segments+=1
x=self.backbone(input_ids=input_ids[:,:self.window_size],attention_mask=attention_mask[:,:self.window_size],return_dict=False)[0]
for i in range(1,segments+1):
start=self.window_size-self.edge_len+(i-1)*self.inner_len
end=self.window_size-self.edge_len+(i-1)*self.inner_len+self.window_size
end=min(end,L)
x_next=input_ids[:,start:end]
mask_next=attention_mask[:,start:end]
x_next=self.backbone(input_ids=x_next,attention_mask=mask_next,return_dict=False)[0]
#L_next=x_next.shape[1]-self.edge_len,
if i==segments:
x_next=x_next[:,self.edge_len:]
else:
x_next=x_next[:,self.edge_len:self.edge_len+self.inner_len]
#print(x_next.shape)
x=torch.cat([x,x_next],1)
#print(start,end)
#print(x.shape)
if return_transformer_hidden_states:
transformer_hidden_states=x
x=self.lstm(x)
x=self.classification_head(x)
else:
transformer_hidden_states=input_ids
x=self.lstm(transformer_hidden_states)
x=self.classification_head(x)
if return_transformer_hidden_states:
return [x[:,:,rearrange_indices]], transformer_hidden_states
else:
return [x[:,:,rearrange_indices]]#, BIO_output
model = SlidingWindowTransformerModel(DOWNLOADED_MODEL_PATH).to(config['device'])
import warnings
warnings.filterwarnings('ignore', '.*__floordiv__ is deprecated.*',)
# LOOP TO TRAIN MODEL (or load model)
if not LOAD_MODEL_FROM:
for epoch in range(config['epochs']):
print(f"### Training epoch: {epoch + 1}")
for g in optimizer.param_groups:
g['lr'] = config['learning_rates'][epoch]
lr = optimizer.param_groups[0]['lr']
print(f'### LR = {lr}\n')
train(epoch)
torch.cuda.empty_cache()
gc.collect()
torch.save(model.state_dict(), f'bigbird_v{VER}.pt')
# # Inference and Validation Code
# We will infer in batches using our data loader which is faster than inferring one text at a time with a for-loop. The metric code is taken from Rob Mulla's great notebook [here][2]. Our model achieves validation F1 score 0.615!
#
# During inference our model will make predictions for each subword token. Some single words consist of multiple subword tokens. In the code below, we use a word's first subword token prediction as the label for the entire word. We can try other approaches, like averaging all subword predictions or taking `B` labels before `I` labels etc.
#
# [1]: https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
# [2]: https://www.kaggle.com/robikscube/student-writing-competition-twitch
# In[115]:
# Returns per-word, mean class prediction probability over all tokens corresponding to each word
def inference(data_loader, model_ids):
gc.collect()
torch.cuda.empty_cache()
ensemble_preds = np.zeros((len(data_loader.dataset), config['max_length'], len(labels_to_ids)), dtype=np.float32)
wids = np.full((len(data_loader.dataset), config['max_length']), -100)
for model_i, model_id in enumerate(model_ids):
model.load_state_dict(torch.load(f'{LOAD_MODEL_FROM}/fold{model_id}.pt', map_location=config['device']))
# put model in training mode
model.eval()
for batch_i, batch in tqdm(enumerate(data_loader),total=len(data_loader)):
if model_i == 0: wids[batch_i*config['valid_batch_size']:(batch_i+1)*config['valid_batch_size'],:batch['wids'].shape[1]] = batch['wids'].numpy()
# MOVE BATCH TO GPU AND INFER
ids = batch["input_ids"].to(config['device'])
mask = batch["attention_mask"].to(config['device'])
with torch.no_grad():
with amp.autocast():
outputs, hidden_states = model(ids, attention_mask=mask,return_transformer_hidden_states=True)
all_preds = torch.nn.functional.softmax(outputs[0], dim=2).cpu().detach().numpy()
#all_preds/=2
ensemble_preds[batch_i*config['valid_batch_size']:(batch_i+1)*config['valid_batch_size'],:all_preds.shape[1]] += all_preds
del ids
del mask
del outputs
del all_preds
gc.collect()
torch.cuda.empty_cache()
ensemble_preds /= len(model_ids)
predictions = []
# INTERATE THROUGH EACH TEXT AND GET PRED
for text_i in range(ensemble_preds.shape[0]):
token_preds = ensemble_preds[text_i]
prediction = []
previous_word_idx = -1
prob_buffer = []
word_ids = wids[text_i][wids[text_i] != -100]
for idx,word_idx in enumerate(word_ids):
if word_idx == -1:
pass
elif word_idx != previous_word_idx:
if prob_buffer:
prediction.append(np.mean(prob_buffer, dtype=np.float32, axis=0))
prob_buffer = []
prob_buffer.append(token_preds[idx])
previous_word_idx = word_idx
else:
prob_buffer.append(token_preds[idx])
prediction.append(np.mean(prob_buffer, dtype=np.float32, axis=0))
predictions.append(prediction)
gc.collect()
torch.cuda.empty_cache()
return predictions
# In[117]:
import pickle
valid = train_df.loc[train_df['id'].isin(IDS[valid_idx])]
print('Predicting with BigBird...')
if not SUBMISSION:
try:
with open( KAGGLE_CACHE + f"/valid_preds_fold{args.fold}.p", "rb" ) as validFile:
valid_word_preds = pickle.load( validFile )
print("preds loaded")
except:
valid_word_preds = inference(testing_loader, ENSEMBLE_IDS)
with open( cache + f"/valid_preds_fold{args.fold}.p", "wb+" ) as validFile:
pickle.dump( valid_word_preds, validFile )
else: valid_word_preds = []
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.GRU",
"torch.nn.LSTM",
"torch.cuda.amp.autocast",
"torch.cuda.is_available",
"torch.load",
"torch.nn.functional.pad",
"torch.nn.LayerNorm",
"torch.nn.Conv1d",
"torch.utils.data.DataLoader",
"torch.as_tensor",
"torch.cuda.empty_cache",
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.no_grad",
"torch.nn.BatchNorm1d"
] | 1.10.2 | Shujun-He/3rd_Solution_Feedback_Prize_Evaluating_Student_Writing | 1a3d1041978ab27f7158505b3d1438676d15b7ca |
0.2 | from torch import nn
from modules.TimeDistributed import TimeDistributed
from modules.Utils import utils
class BiRNNEncoder(nn.Module):
def __init__(self, max_length, nr_hidden, dropout=0.0):
super(BiRNNEncoder, self).__init__()
self.nr_hidden = nr_hidden
self.fully_connected = nn.Sequential(
nn.Linear(nr_hidden * 2, nr_hidden),
nn.ReLU(),
nn.Dropout(dropout)
)
self.lstm = nn.LSTM(nr_hidden, nr_hidden, max_length,
dropout=dropout,
bidirectional=True,
batch_first=True)
self.fully_connected = TimeDistributed(self.fully_connected)
self.dropout = nn.Dropout(p=dropout)
utils.init_weights(self)
def forward(self, input):
output, _ = self.lstm(input)
return self.dropout(self.fully_connected(output))
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.ReLU"
] | 0.2 | apsdehal/nli-batch-optimizations | 946dbeae58edd39dcda948d03765f7b1070b4eab |
1.0 | r"""
metrics 模块实现了 fastNLP 所需的各种常用衡量指标,一般做为 :class:`~fastNLP.Trainer` 的参数使用。
"""
__all__ = [
"MetricBase",
"AccuracyMetric",
"SpanFPreRecMetric",
"CMRC2018Metric",
"ClassifyFPreRecMetric",
"ConfusionMatrixMetric"
]
import inspect
import warnings
from abc import abstractmethod
from collections import defaultdict
from typing import Union
from copy import deepcopy
import re
import numpy as np
import torch
from .utils import _CheckError
from .utils import _CheckRes
from .utils import _build_args
from .utils import _check_arg_dict_list
from .utils import _get_func_signature
from .utils import seq_len_to_mask
from .vocabulary import Vocabulary
from .utils import ConfusionMatrix
class MetricBase(object):
r"""
所有metrics的基类,所有的传入到Trainer, Tester的Metric需要继承自该对象,需要覆盖写入evaluate(), get_metric()方法。
evaluate(xxx)中传入的是一个batch的数据。
get_metric(xxx)当所有数据处理完毕,调用该方法得到最终的metric值
以分类问题中,Accuracy计算为例
假设model的forward返回dict中包含 `pred` 这个key, 并且该key需要用于Accuracy::
class Model(nn.Module):
def __init__(xxx):
# do something
def forward(self, xxx):
# do something
return {'pred': pred, 'other_keys':xxx} # pred's shape: batch_size x num_classes
假设dataset中 `label` 这个field是需要预测的值,并且该field被设置为了target
对应的AccMetric可以按如下的定义, version1, 只使用这一次::
class AccMetric(MetricBase):
def __init__(self):
super().__init__()
# 根据你的情况自定义指标
self.corr_num = 0
self.total = 0
def evaluate(self, label, pred): # 这里的名称需要和dataset中target field与model返回的key是一样的,不然找不到对应的value
# dev或test时,每个batch结束会调用一次该方法,需要实现如何根据每个batch累加metric
self.total += label.size(0)
self.corr_num += label.eq(pred).sum().item()
def get_metric(self, reset=True): # 在这里定义如何计算metric
acc = self.corr_num/self.total
if reset: # 是否清零以便重新计算
self.corr_num = 0
self.total = 0
return {'acc': acc} # 需要返回一个dict,key为该metric的名称,该名称会显示到Trainer的progress bar中
version2,如果需要复用Metric,比如下一次使用AccMetric时,dataset中目标field不叫label而叫y,或者model的输出不是pred::
class AccMetric(MetricBase):
def __init__(self, label=None, pred=None):
# 假设在另一场景使用时,目标field叫y,model给出的key为pred_y。则只需要在初始化AccMetric时,
# acc_metric = AccMetric(label='y', pred='pred_y')即可。
# 当初始化为acc_metric = AccMetric(),即label=None, pred=None, fastNLP会直接使用'label', 'pred'作为key去索取对
# 应的的值
super().__init__()
self._init_param_map(label=label, pred=pred) # 该方法会注册label和pred. 仅需要注册evaluate()方法会用到的参数名即可
# 如果没有注册该则效果与version1就是一样的
# 根据你的情况自定义指标
self.corr_num = 0
self.total = 0
def evaluate(self, label, pred): # 这里的参数名称需要和self._init_param_map()注册时一致。
# dev或test时,每个batch结束会调用一次该方法,需要实现如何根据每个batch累加metric
self.total += label.size(0)
self.corr_num += label.eq(pred).sum().item()
def get_metric(self, reset=True): # 在这里定义如何计算metric
acc = self.corr_num/self.total
if reset: # 是否清零以便重新计算
self.corr_num = 0
self.total = 0
return {'acc': acc} # 需要返回一个dict,key为该metric的名称,该名称会显示到Trainer的progress bar中
``MetricBase`` 将会在输入的字典 ``pred_dict`` 和 ``target_dict`` 中进行检查.
``pred_dict`` 是模型当中 ``forward()`` 函数或者 ``predict()`` 函数的返回值.
``target_dict`` 是DataSet当中的ground truth, 判定ground truth的条件是field的 ``is_target`` 被设置为True.
``MetricBase`` 会进行以下的类型检测:
1. self.evaluate当中是否有varargs, 这是不支持的.
2. self.evaluate当中所需要的参数是否既不在 ``pred_dict`` 也不在 ``target_dict`` .
3. self.evaluate当中所需要的参数是否既在 ``pred_dict`` 也在 ``target_dict`` .
除此以外,在参数被传入self.evaluate以前,这个函数会检测 ``pred_dict`` 和 ``target_dict`` 当中没有被用到的参数
如果kwargs是self.evaluate的参数,则不会检测
self.evaluate将计算一个批次(batch)的评价指标,并累计。 没有返回值
self.get_metric将统计当前的评价指标并返回评价结果, 返回值需要是一个dict, key是指标名称,value是指标的值
"""
def __init__(self):
self._param_map = {} # key is param in function, value is input param.
self._checked = False
self._metric_name = self.__class__.__name__
@property
def param_map(self):
if len(self._param_map) == 0: # 如果为空说明还没有初始化
func_spect = inspect.getfullargspec(self.evaluate)
func_args = [arg for arg in func_spect.args if arg != 'self']
for arg in func_args:
self._param_map[arg] = arg
return self._param_map
@abstractmethod
def evaluate(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def get_metric(self, reset=True):
raise NotImplemented
def set_metric_name(self, name: str):
r"""
设置metric的名称,默认是Metric的class name.
:param str name:
:return: self
"""
self._metric_name = name
return self
def get_metric_name(self):
r"""
返回metric的名称
:return:
"""
return self._metric_name
def _init_param_map(self, key_map=None, **kwargs):
r"""检查key_map和其他参数map,并将这些映射关系添加到self._param_map
:param dict key_map: 表示key的映射关系
:param kwargs: key word args里面的每一个的键-值对都会被构造成映射关系
:return: None
"""
value_counter = defaultdict(set)
if key_map is not None:
if not isinstance(key_map, dict):
raise TypeError("key_map must be `dict`, got {}.".format(type(key_map)))
for key, value in key_map.items():
if value is None:
self._param_map[key] = key
continue
if not isinstance(key, str):
raise TypeError(f"key in key_map must be `str`, not `{type(key)}`.")
if not isinstance(value, str):
raise TypeError(f"value in key_map must be `str`, not `{type(value)}`.")
self._param_map[key] = value
value_counter[value].add(key)
for key, value in kwargs.items():
if value is None:
self._param_map[key] = key
continue
if not isinstance(value, str):
raise TypeError(f"in {key}={value}, value must be `str`, not `{type(value)}`.")
self._param_map[key] = value
value_counter[value].add(key)
for value, key_set in value_counter.items():
if len(key_set) > 1:
raise ValueError(f"Several parameters:{key_set} are provided with one output {value}.")
# check consistence between signature and _param_map
func_spect = inspect.getfullargspec(self.evaluate)
func_args = [arg for arg in func_spect.args if arg != 'self']
for func_param, input_param in self._param_map.items():
if func_param not in func_args:
raise NameError(
f"Parameter `{func_param}` is not in {_get_func_signature(self.evaluate)}. Please check the "
f"initialization parameters, or change its signature.")
def __call__(self, pred_dict, target_dict):
r"""
这个方法会调用self.evaluate 方法.
在调用之前,会进行以下检测:
1. self.evaluate当中是否有varargs, 这是不支持的.
2. self.evaluate当中所需要的参数是否既不在``pred_dict``也不在``target_dict``.
3. self.evaluate当中所需要的参数是否既在``pred_dict``也在``target_dict``.
除此以外,在参数被传入self.evaluate以前,这个函数会检测``pred_dict``和``target_dict``当中没有被用到的参数
如果kwargs是self.evaluate的参数,则不会检测
:param pred_dict: 模型的forward函数或者predict函数返回的dict
:param target_dict: DataSet.batch_y里的键-值对所组成的dict(即is_target=True的fields的内容)
:return:
"""
if not self._checked:
if not callable(self.evaluate):
raise TypeError(f"{self.__class__.__name__}.evaluate has to be callable, not {type(self.evaluate)}.")
# 1. check consistence between signature and _param_map
func_spect = inspect.getfullargspec(self.evaluate)
func_args = set([arg for arg in func_spect.args if arg != 'self'])
for func_arg, input_arg in self._param_map.items():
if func_arg not in func_args:
raise NameError(f"`{func_arg}` not in {_get_func_signature(self.evaluate)}.")
# 2. only part of the _param_map are passed, left are not
for arg in func_args:
if arg not in self._param_map:
self._param_map[arg] = arg # This param does not need mapping.
self._evaluate_args = func_args
self._reverse_param_map = {input_arg: func_arg for func_arg, input_arg in self._param_map.items()}
# need to wrap inputs in dict.
mapped_pred_dict = {}
mapped_target_dict = {}
for input_arg, mapped_arg in self._reverse_param_map.items():
if input_arg in pred_dict:
mapped_pred_dict[mapped_arg] = pred_dict[input_arg]
if input_arg in target_dict:
mapped_target_dict[mapped_arg] = target_dict[input_arg]
# missing
if not self._checked:
duplicated = []
for input_arg, mapped_arg in self._reverse_param_map.items():
if input_arg in pred_dict and input_arg in target_dict:
duplicated.append(input_arg)
check_res = _check_arg_dict_list(self.evaluate, [mapped_pred_dict, mapped_target_dict])
# only check missing.
# replace missing.
missing = check_res.missing
replaced_missing = list(missing)
for idx, func_arg in enumerate(missing):
# Don't delete `` in this information, nor add ``
replaced_missing[idx] = f"{self._param_map[func_arg]}" + f"(assign to `{func_arg}` " \
f"in `{self.__class__.__name__}`)"
check_res = _CheckRes(missing=replaced_missing,
unused=check_res.unused,
duplicated=duplicated,
required=check_res.required,
all_needed=check_res.all_needed,
varargs=check_res.varargs)
if check_res.missing or check_res.duplicated:
raise _CheckError(check_res=check_res,
func_signature=_get_func_signature(self.evaluate))
self._checked = True
refined_args = _build_args(self.evaluate, **mapped_pred_dict, **mapped_target_dict)
self.evaluate(**refined_args)
return
class ConfusionMatrixMetric(MetricBase):
r"""
分类问题计算混淆矩阵的Metric(其它的Metric参见 :mod:`fastNLP.core.metrics` )
最后返回结果为::
dict,{'confusion_matrix': ConfusionMatrix实例}
ConfusionMatrix实例的print()函数将输出矩阵字符串。
.. code ::
pred_dict = {"pred": torch.Tensor([2,1,3])}
target_dict = {'target': torch.Tensor([2,2,1])}
metric = ConfusionMatrixMetric()
metric(pred_dict=pred_dict, target_dict=target_dict, )
print(metric.get_metric())
.. code ::
{'confusion_matrix':
target 1.0 2.0 3.0 all
pred
1.0 0 1 0 1
2.0 0 1 0 1
3.0 1 0 0 1
all 1 2 0 3
}
"""
def __init__(self,
vocab=None,
pred=None,
target=None,
seq_len=None,
print_ratio=False
):
r"""
:param vocab: vocab词表类,要求有to_word()方法。
:param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred`
:param target: 参数映射表中 `target` 的映射关系,None表示映射关系为 `target` -> `target`
:param seq_len: 参数映射表中 `seq_len` 的映射关系,None表示映射关系为 `seq_len` -> `seq_len`
:param print_ratio: 限制print的输出,false only for result, true for result, percent(dim=0), percent(dim = 1)
"""
super().__init__()
self._init_param_map(pred=pred, target=target, seq_len=seq_len)
self.confusion_matrix = ConfusionMatrix(
vocab=vocab,
print_ratio=print_ratio,
)
def evaluate(self, pred, target, seq_len=None):
r"""
evaluate函数将针对一个批次的预测结果做评价指标的累计
:param torch.Tensor pred: 预测的tensor, tensor的形状可以是torch.Size([B,]), torch.Size([B, n_classes]),
torch.Size([B, max_len]), 或者torch.Size([B, max_len, n_classes])
:param torch.Tensor target: 真实值的tensor, tensor的形状可以是Element's can be: torch.Size([B,]),
torch.Size([B,]), torch.Size([B, max_len]), 或者torch.Size([B, max_len])
:param torch.Tensor seq_len: 序列长度标记, 标记的形状可以是None, torch.Size([B]), 或者torch.Size([B]).
"""
if not isinstance(pred, torch.Tensor):
raise TypeError(
f"`pred` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(pred)}.")
if not isinstance(target, torch.Tensor):
raise TypeError(
f"`target` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(target)}.")
if seq_len is not None and not isinstance(seq_len, torch.Tensor):
raise TypeError(
f"`seq_lens` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(seq_len)}.")
if pred.dim() == target.dim():
pass
elif pred.dim() == target.dim() + 1:
pred = pred.argmax(dim=-1)
if seq_len is None and target.dim() > 1:
warnings.warn("You are not passing `seq_len` to exclude pad.")
else:
raise RuntimeError(
f"In {_get_func_signature(self.evaluate)}, when pred have "
f"size:{pred.size()}, target should have size: {pred.size()} or "
f"{pred.size()[:-1]}, got {target.size()}.")
target = target.to(pred)
if seq_len is not None and target.dim() > 1:
for p, t, l in zip(pred.tolist(), target.tolist(),
seq_len.tolist()):
l = int(l)
self.confusion_matrix.add_pred_target(p[:l], t[:l])
elif target.dim() > 1: #对于没有传入seq_len,但是又是高维的target,按全长输出
for p, t in zip(pred.tolist(), target.tolist()):
self.confusion_matrix.add_pred_target(p, t)
else:
self.confusion_matrix.add_pred_target(pred.tolist(),
target.tolist())
def get_metric(self, reset=True):
r"""
get_metric函数将根据evaluate函数累计的评价指标统计量来计算最终的评价结果.
:param bool reset: 在调用完get_metric后是否清空评价指标统计量.
:return dict evaluate_result: {"confusion_matrix": ConfusionMatrix}
"""
confusion = {'confusion_matrix': deepcopy(self.confusion_matrix)}
if reset:
self.confusion_matrix.clear()
return confusion
class AccuracyMetric(MetricBase):
r"""
准确率Metric(其它的Metric参见 :mod:`fastNLP.core.metrics` )
"""
def __init__(self, pred=None, target=None, seq_len=None):
r"""
:param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred`
:param target: 参数映射表中 `target` 的映射关系,None表示映射关系为 `target` -> `target`
:param seq_len: 参数映射表中 `seq_len` 的映射关系,None表示映射关系为 `seq_len` -> `seq_len`
"""
super().__init__()
self._init_param_map(pred=pred, target=target, seq_len=seq_len)
self.total = 0
self.acc_count = 0
def evaluate(self, pred, target, seq_len=None):
r"""
evaluate函数将针对一个批次的预测结果做评价指标的累计
:param torch.Tensor pred: 预测的tensor, tensor的形状可以是torch.Size([B,]), torch.Size([B, n_classes]),
torch.Size([B, max_len]), 或者torch.Size([B, max_len, n_classes])
:param torch.Tensor target: 真实值的tensor, tensor的形状可以是Element's can be: torch.Size([B,]),
torch.Size([B,]), torch.Size([B, max_len]), 或者torch.Size([B, max_len])
:param torch.Tensor seq_len: 序列长度标记, 标记的形状可以是None, None, torch.Size([B]), 或者torch.Size([B]).
如果mask也被传进来的话seq_len会被忽略.
"""
# TODO 这里报错需要更改,因为pred是啥用户并不知道。需要告知用户真实的value
if not isinstance(pred, torch.Tensor):
raise TypeError(f"`pred` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(pred)}.")
if not isinstance(target, torch.Tensor):
raise TypeError(f"`target` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(target)}.")
if seq_len is not None and not isinstance(seq_len, torch.Tensor):
raise TypeError(f"`seq_lens` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(seq_len)}.")
if seq_len is not None and target.dim() > 1:
max_len = target.size(1)
masks = seq_len_to_mask(seq_len=seq_len, max_len=max_len)
else:
masks = None
if pred.dim() == target.dim():
pass
elif pred.dim() == target.dim() + 1:
pred = pred.argmax(dim=-1)
if seq_len is None and target.dim() > 1:
warnings.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.")
else:
raise RuntimeError(f"In {_get_func_signature(self.evaluate)}, when pred have "
f"size:{pred.size()}, target should have size: {pred.size()} or "
f"{pred.size()[:-1]}, got {target.size()}.")
target = target.to(pred)
if masks is not None:
self.acc_count += torch.sum(torch.eq(pred, target).masked_fill(masks.eq(False), 0)).item()
self.total += torch.sum(masks).item()
else:
self.acc_count += torch.sum(torch.eq(pred, target)).item()
self.total += np.prod(list(pred.size()))
def get_metric(self, reset=True):
r"""
get_metric函数将根据evaluate函数累计的评价指标统计量来计算最终的评价结果.
:param bool reset: 在调用完get_metric后是否清空评价指标统计量.
:return dict evaluate_result: {"acc": float}
"""
evaluate_result = {'acc': round(float(self.acc_count) / (self.total + 1e-12), 6)}
if reset:
self.acc_count = 0
self.total = 0
return evaluate_result
class ClassifyFPreRecMetric(MetricBase):
r"""
分类问题计算FPR值的Metric(其它的Metric参见 :mod:`fastNLP.core.metrics` )
最后得到的metric结果为::
{
'f': xxx, # 这里使用f考虑以后可以计算f_beta值
'pre': xxx,
'rec':xxx
}
若only_gross=False, 即还会返回各个label的metric统计值::
{
'f': xxx,
'pre': xxx,
'rec':xxx,
'f-label': xxx,
'pre-label': xxx,
'rec-label':xxx,
...
}
"""
def __init__(self, tag_vocab=None, pred=None, target=None, seq_len=None, ignore_labels=None,
only_gross=True, f_type='micro', beta=1):
r"""
:param tag_vocab: 标签的 :class:`~fastNLP.Vocabulary` . 默认值为None。若为None则使用数字来作为标签内容,否则使用vocab来作为标签内容。
:param str pred: 用该key在evaluate()时从传入dict中取出prediction数据。 为None,则使用 `pred` 取数据
:param str target: 用该key在evaluate()时从传入dict中取出target数据。 为None,则使用 `target` 取数据
:param str seq_len: 用该key在evaluate()时从传入dict中取出sequence length数据。为None,则使用 `seq_len` 取数据。
:param list ignore_labels: str 组成的list. 这个list中的class不会被用于计算。例如在POS tagging时传入['NN'],则不会计算'NN'个label
:param bool only_gross: 是否只计算总的f1, precision, recall的值;如果为False,不仅返回总的f1, pre, rec, 还会返回每个label的f1, pre, rec
:param str f_type: `micro` 或 `macro` . `micro` :通过先计算总体的TP,FN和FP的数量,再计算f, precision, recall; `macro` : 分布计算每个类别的f, precision, recall,然后做平均(各类别f的权重相同)
:param float beta: f_beta分数, :math:`f_{beta} = \frac{(1 + {beta}^{2})*(pre*rec)}{({beta}^{2}*pre + rec)}` . 常用为 `beta=0.5, 1, 2` 若为0.5则精确率的权重高于召回率;若为1,则两者平等;若为2,则召回率权重高于精确率。
"""
if tag_vocab:
if not isinstance(tag_vocab, Vocabulary):
raise TypeError("tag_vocab can only be fastNLP.Vocabulary, not {}.".format(type(tag_vocab)))
if f_type not in ('micro', 'macro'):
raise ValueError("f_type only supports `micro` or `macro`', got {}.".format(f_type))
self.ignore_labels = ignore_labels
self.f_type = f_type
self.beta = beta
self.beta_square = self.beta ** 2
self.only_gross = only_gross
super().__init__()
self._init_param_map(pred=pred, target=target, seq_len=seq_len)
self.tag_vocab = tag_vocab
self._tp, self._fp, self._fn = defaultdict(int), defaultdict(int), defaultdict(int)
# tp: truth=T, classify=T; fp: truth=T, classify=F; fn: truth=F, classify=T
def evaluate(self, pred, target, seq_len=None):
r"""
evaluate函数将针对一个批次的预测结果做评价指标的累计
:param torch.Tensor pred: 预测的tensor, tensor的形状可以是torch.Size([B,]), torch.Size([B, n_classes]),
torch.Size([B, max_len]), 或者torch.Size([B, max_len, n_classes])
:param torch.Tensor target: 真实值的tensor, tensor的形状可以是Element's can be: torch.Size([B,]),
torch.Size([B,]), torch.Size([B, max_len]), 或者torch.Size([B, max_len])
:param torch.Tensor seq_len: 序列长度标记, 标记的形状可以是None, None, torch.Size([B]), 或者torch.Size([B]).
如果mask也被传进来的话seq_len会被忽略.
"""
# TODO 这里报错需要更改,因为pred是啥用户并不知道。需要告知用户真实的value
if not isinstance(pred, torch.Tensor):
raise TypeError(f"`pred` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(pred)}.")
if not isinstance(target, torch.Tensor):
raise TypeError(f"`target` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(target)}.")
if seq_len is not None and not isinstance(seq_len, torch.Tensor):
raise TypeError(f"`seq_lens` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(seq_len)}.")
if seq_len is not None and target.dim() > 1:
max_len = target.size(1)
masks = seq_len_to_mask(seq_len=seq_len, max_len=max_len)
else:
masks = torch.ones_like(target).long().to(target.device)
masks = masks.eq(False)
if pred.dim() == target.dim():
pass
elif pred.dim() == target.dim() + 1:
pred = pred.argmax(dim=-1)
if seq_len is None and target.dim() > 1:
warnings.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.")
else:
raise RuntimeError(f"In {_get_func_signature(self.evaluate)}, when pred have "
f"size:{pred.size()}, target should have size: {pred.size()} or "
f"{pred.size()[:-1]}, got {target.size()}.")
target_idxes = set(target.reshape(-1).tolist())
target = target.to(pred)
for target_idx in target_idxes:
self._tp[target_idx] += torch.sum((pred == target_idx).long().masked_fill(target != target_idx, 0).masked_fill(masks, 0)).item()
self._fp[target_idx] += torch.sum((pred != target_idx).long().masked_fill(target != target_idx, 0).masked_fill(masks, 0)).item()
self._fn[target_idx] += torch.sum((pred == target_idx).long().masked_fill(target == target_idx, 0).masked_fill(masks, 0)).item()
def get_metric(self, reset=True):
r"""
get_metric函数将根据evaluate函数累计的评价指标统计量来计算最终的评价结果.
:param bool reset: 在调用完get_metric后是否清空评价指标统计量.
:return dict evaluate_result: {"acc": float}
"""
evaluate_result = {}
if not self.only_gross or self.f_type == 'macro':
tags = set(self._fn.keys())
tags.update(set(self._fp.keys()))
tags.update(set(self._tp.keys()))
f_sum = 0
pre_sum = 0
rec_sum = 0
for tag in tags:
if self.tag_vocab is not None:
tag_name = self.tag_vocab.to_word(tag)
else:
tag_name = int(tag)
tp = self._tp[tag]
fn = self._fn[tag]
fp = self._fp[tag]
f, pre, rec = _compute_f_pre_rec(self.beta_square, tp, fn, fp)
f_sum += f
pre_sum += pre
rec_sum += rec
if not self.only_gross and tag != '': # tag!=''防止无tag的情况
f_key = 'f-{}'.format(tag_name)
pre_key = 'pre-{}'.format(tag_name)
rec_key = 'rec-{}'.format(tag_name)
evaluate_result[f_key] = f
evaluate_result[pre_key] = pre
evaluate_result[rec_key] = rec
if self.f_type == 'macro':
evaluate_result['f'] = f_sum / len(tags)
evaluate_result['pre'] = pre_sum / len(tags)
evaluate_result['rec'] = rec_sum / len(tags)
if self.f_type == 'micro':
f, pre, rec = _compute_f_pre_rec(self.beta_square,
sum(self._tp.values()),
sum(self._fn.values()),
sum(self._fp.values()))
evaluate_result['f'] = f
evaluate_result['pre'] = pre
evaluate_result['rec'] = rec
if reset:
self._tp = defaultdict(int)
self._fp = defaultdict(int)
self._fn = defaultdict(int)
for key, value in evaluate_result.items():
evaluate_result[key] = round(value, 6)
return evaluate_result
def _bmes_tag_to_spans(tags, ignore_labels=None):
r"""
给定一个tags的lis,比如['S-song', 'B-singer', 'M-singer', 'E-singer', 'S-moive', 'S-actor']。
返回[('song', (0, 1)), ('singer', (1, 4)), ('moive', (4, 5)), ('actor', (5, 6))] (左闭右开区间)
也可以是单纯的['S', 'B', 'M', 'E', 'B', 'M', 'M',...]序列
:param tags: List[str],
:param ignore_labels: List[str], 在该list中的label将被忽略
:return: List[Tuple[str, List[int, int]]]. [(label,[start, end])]
"""
ignore_labels = set(ignore_labels) if ignore_labels else set()
spans = []
prev_bmes_tag = None
for idx, tag in enumerate(tags):
tag = tag.lower()
bmes_tag, label = tag[:1], tag[2:]
if bmes_tag in ('b', 's'):
spans.append((label, [idx, idx]))
elif bmes_tag in ('m', 'e') and prev_bmes_tag in ('b', 'm') and label == spans[-1][0]:
spans[-1][1][1] = idx
else:
spans.append((label, [idx, idx]))
prev_bmes_tag = bmes_tag
return [(span[0], (span[1][0], span[1][1] + 1))
for span in spans
if span[0] not in ignore_labels
]
def _bmeso_tag_to_spans(tags, ignore_labels=None):
r"""
给定一个tags的lis,比如['O', 'B-singer', 'M-singer', 'E-singer', 'O', 'O']。
返回[('singer', (1, 4))] (左闭右开区间)
:param tags: List[str],
:param ignore_labels: List[str], 在该list中的label将被忽略
:return: List[Tuple[str, List[int, int]]]. [(label,[start, end])]
"""
ignore_labels = set(ignore_labels) if ignore_labels else set()
spans = []
prev_bmes_tag = None
for idx, tag in enumerate(tags):
tag = tag.lower()
bmes_tag, label = tag[:1], tag[2:]
if bmes_tag in ('b', 's'):
spans.append((label, [idx, idx]))
elif bmes_tag in ('m', 'e') and prev_bmes_tag in ('b', 'm') and label == spans[-1][0]:
spans[-1][1][1] = idx
elif bmes_tag == 'o':
pass
else:
spans.append((label, [idx, idx]))
prev_bmes_tag = bmes_tag
return [(span[0], (span[1][0], span[1][1] + 1))
for span in spans
if span[0] not in ignore_labels
]
def _bioes_tag_to_spans(tags, ignore_labels=None):
r"""
给定一个tags的lis,比如['O', 'B-singer', 'I-singer', 'E-singer', 'O', 'O']。
返回[('singer', (1, 4))] (左闭右开区间)
:param tags: List[str],
:param ignore_labels: List[str], 在该list中的label将被忽略
:return: List[Tuple[str, List[int, int]]]. [(label,[start, end])]
"""
ignore_labels = set(ignore_labels) if ignore_labels else set()
spans = []
prev_bioes_tag = None
for idx, tag in enumerate(tags):
tag = tag.lower()
bioes_tag, label = tag[:1], tag[2:]
if bioes_tag in ('b', 's'):
spans.append((label, [idx, idx]))
elif bioes_tag in ('i', 'e') and prev_bioes_tag in ('b', 'i') and label == spans[-1][0]:
spans[-1][1][1] = idx
elif bioes_tag == 'o':
pass
else:
spans.append((label, [idx, idx]))
prev_bioes_tag = bioes_tag
return [(span[0], (span[1][0], span[1][1] + 1))
for span in spans
if span[0] not in ignore_labels
]
def _bio_tag_to_spans(tags, ignore_labels=None):
r"""
给定一个tags的lis,比如['O', 'B-singer', 'I-singer', 'I-singer', 'O', 'O']。
返回[('singer', (1, 4))] (左闭右开区间)
:param tags: List[str],
:param ignore_labels: List[str], 在该list中的label将被忽略
:return: List[Tuple[str, List[int, int]]]. [(label,[start, end])]
"""
ignore_labels = set(ignore_labels) if ignore_labels else set()
spans = []
prev_bio_tag = None
for idx, tag in enumerate(tags):
tag = tag.lower()
bio_tag, label = tag[:1], tag[2:]
if bio_tag == 'b':
spans.append((label, [idx, idx]))
elif bio_tag == 'i' and prev_bio_tag in ('b', 'i') and label == spans[-1][0]:
spans[-1][1][1] = idx
elif bio_tag == 'o': # o tag does not count
pass
else:
spans.append((label, [idx, idx]))
prev_bio_tag = bio_tag
return [(span[0], (span[1][0], span[1][1] + 1)) for span in spans if span[0] not in ignore_labels]
def _get_encoding_type_from_tag_vocab(tag_vocab: Union[Vocabulary, dict]) -> str:
r"""
给定Vocabulary自动判断是哪种类型的encoding, 支持判断bmes, bioes, bmeso, bio
:param tag_vocab: 支持传入tag Vocabulary; 或者传入形如{0:"O", 1:"B-tag1"},即index在前,tag在后的dict。
:return:
"""
tag_set = set()
unk_token = '<unk>'
pad_token = '<pad>'
if isinstance(tag_vocab, Vocabulary):
unk_token = tag_vocab.unknown
pad_token = tag_vocab.padding
tag_vocab = tag_vocab.idx2word
for idx, tag in tag_vocab.items():
if tag in (unk_token, pad_token):
continue
tag = tag[:1].lower()
tag_set.add(tag)
bmes_tag_set = set('bmes')
if tag_set == bmes_tag_set:
return 'bmes'
bio_tag_set = set('bio')
if tag_set == bio_tag_set:
return 'bio'
bmeso_tag_set = set('bmeso')
if tag_set == bmeso_tag_set:
return 'bmeso'
bioes_tag_set = set('bioes')
if tag_set == bioes_tag_set:
return 'bioes'
raise RuntimeError("encoding_type cannot be inferred automatically. Only support "
"'bio', 'bmes', 'bmeso', 'bioes' type.")
def _check_tag_vocab_and_encoding_type(tag_vocab: Union[Vocabulary, dict], encoding_type: str):
r"""
检查vocab中的tag是否与encoding_type是匹配的
:param tag_vocab: 支持传入tag Vocabulary; 或者传入形如{0:"O", 1:"B-tag1"},即index在前,tag在后的dict。
:param encoding_type: bio, bmes, bioes, bmeso
:return:
"""
tag_set = set()
unk_token = '<unk>'
pad_token = '<pad>'
if isinstance(tag_vocab, Vocabulary):
unk_token = tag_vocab.unknown
pad_token = tag_vocab.padding
tag_vocab = tag_vocab.idx2word
for idx, tag in tag_vocab.items():
if tag in (unk_token, pad_token):
continue
tag = tag[:1].lower()
tag_set.add(tag)
tags = encoding_type
for tag in tag_set:
assert tag in tags, f"{tag} is not a valid tag in encoding type:{encoding_type}. Please check your " \
f"encoding_type."
tags = tags.replace(tag, '') # 删除该值
if tags: # 如果不为空,说明出现了未使用的tag
warnings.warn(f"Tag:{tags} in encoding type:{encoding_type} is not presented in your Vocabulary. Check your "
"encoding_type.")
class SpanFPreRecMetric(MetricBase):
r"""
在序列标注问题中,以span的方式计算F, pre, rec.
比如中文Part of speech中,会以character的方式进行标注,句子 `中国在亚洲` 对应的POS可能为(以BMES为例)
['B-NN', 'E-NN', 'S-DET', 'B-NN', 'E-NN']。该metric就是为类似情况下的F1计算。
最后得到的metric结果为::
{
'f': xxx, # 这里使用f考虑以后可以计算f_beta值
'pre': xxx,
'rec':xxx
}
若only_gross=False, 即还会返回各个label的metric统计值::
{
'f': xxx,
'pre': xxx,
'rec':xxx,
'f-label': xxx,
'pre-label': xxx,
'rec-label':xxx,
...
}
"""
def __init__(self, tag_vocab, pred=None, target=None, seq_len=None, encoding_type=None, ignore_labels=None,
only_gross=True, f_type='micro', beta=1):
r"""
:param tag_vocab: 标签的 :class:`~fastNLP.Vocabulary` 。支持的标签为"B"(没有label);或"B-xxx"(xxx为某种label,比如POS中的NN),
在解码时,会将相同xxx的认为是同一个label,比如['B-NN', 'E-NN']会被合并为一个'NN'.
:param str pred: 用该key在evaluate()时从传入dict中取出prediction数据。 为None,则使用 `pred` 取数据
:param str target: 用该key在evaluate()时从传入dict中取出target数据。 为None,则使用 `target` 取数据
:param str seq_len: 用该key在evaluate()时从传入dict中取出sequence length数据。为None,则使用 `seq_len` 取数据。
:param str encoding_type: 目前支持bio, bmes, bmeso, bioes。默认为None,通过tag_vocab自动判断.
:param list ignore_labels: str 组成的list. 这个list中的class不会被用于计算。例如在POS tagging时传入['NN'],则不会计算'NN'个label
:param bool only_gross: 是否只计算总的f1, precision, recall的值;如果为False,不仅返回总的f1, pre, rec, 还会返回每个label的f1, pre, rec
:param str f_type: `micro` 或 `macro` . `micro` :通过先计算总体的TP,FN和FP的数量,再计算f, precision, recall; `macro` : 分布计算每个类别的f, precision, recall,然后做平均(各类别f的权重相同)
:param float beta: f_beta分数, :math:`f_{beta} = \frac{(1 + {beta}^{2})*(pre*rec)}{({beta}^{2}*pre + rec)}` . 常用为 `beta=0.5, 1, 2` 若为0.5则精确率的权重高于召回率;若为1,则两者平等;若为2,则召回率权重高于精确率。
"""
if not isinstance(tag_vocab, Vocabulary):
raise TypeError("tag_vocab can only be fastNLP.Vocabulary, not {}.".format(type(tag_vocab)))
if f_type not in ('micro', 'macro'):
raise ValueError("f_type only supports `micro` or `macro`', got {}.".format(f_type))
if encoding_type:
encoding_type = encoding_type.lower()
_check_tag_vocab_and_encoding_type(tag_vocab, encoding_type)
self.encoding_type = encoding_type
else:
self.encoding_type = _get_encoding_type_from_tag_vocab(tag_vocab)
if self.encoding_type == 'bmes':
self.tag_to_span_func = _bmes_tag_to_spans
elif self.encoding_type == 'bio':
self.tag_to_span_func = _bio_tag_to_spans
elif self.encoding_type == 'bmeso':
self.tag_to_span_func = _bmeso_tag_to_spans
elif self.encoding_type == 'bioes':
self.tag_to_span_func = _bioes_tag_to_spans
else:
raise ValueError("Only support 'bio', 'bmes', 'bmeso', 'bioes' type.")
self.ignore_labels = ignore_labels
self.f_type = f_type
self.beta = beta
self.beta_square = self.beta ** 2
self.only_gross = only_gross
super().__init__()
self._init_param_map(pred=pred, target=target, seq_len=seq_len)
self.tag_vocab = tag_vocab
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int)
def evaluate(self, pred, target, seq_len):
r"""evaluate函数将针对一个批次的预测结果做评价指标的累计
:param pred: [batch, seq_len] 或者 [batch, seq_len, len(tag_vocab)], 预测的结果
:param target: [batch, seq_len], 真实值
:param seq_len: [batch] 文本长度标记
:return:
"""
if not isinstance(pred, torch.Tensor):
raise TypeError(f"`pred` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(pred)}.")
if not isinstance(target, torch.Tensor):
raise TypeError(f"`target` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(target)}.")
if not isinstance(seq_len, torch.Tensor):
raise TypeError(f"`seq_lens` in {_get_func_signature(self.evaluate)} must be torch.Tensor,"
f"got {type(seq_len)}.")
if pred.size() == target.size() and len(target.size()) == 2:
pass
elif len(pred.size()) == len(target.size()) + 1 and len(target.size()) == 2:
num_classes = pred.size(-1)
pred = pred.argmax(dim=-1)
if (target >= num_classes).any():
raise ValueError("A gold label passed to SpanBasedF1Metric contains an "
"id >= {}, the number of classes.".format(num_classes))
else:
raise RuntimeError(f"In {_get_func_signature(self.evaluate)}, when pred have "
f"size:{pred.size()}, target should have size: {pred.size()} or "
f"{pred.size()[:-1]}, got {target.size()}.")
batch_size = pred.size(0)
pred = pred.tolist()
target = target.tolist()
for i in range(batch_size):
pred_tags = pred[i][:int(seq_len[i])]
gold_tags = target[i][:int(seq_len[i])]
pred_str_tags = [self.tag_vocab.to_word(tag) for tag in pred_tags]
gold_str_tags = [self.tag_vocab.to_word(tag) for tag in gold_tags]
pred_spans = self.tag_to_span_func(pred_str_tags, ignore_labels=self.ignore_labels)
gold_spans = self.tag_to_span_func(gold_str_tags, ignore_labels=self.ignore_labels)
for span in pred_spans:
if span in gold_spans:
self._true_positives[span[0]] += 1
gold_spans.remove(span)
else:
self._false_positives[span[0]] += 1
for span in gold_spans:
self._false_negatives[span[0]] += 1
def get_metric(self, reset=True):
r"""get_metric函数将根据evaluate函数累计的评价指标统计量来计算最终的评价结果."""
evaluate_result = {}
if not self.only_gross or self.f_type == 'macro':
tags = set(self._false_negatives.keys())
tags.update(set(self._false_positives.keys()))
tags.update(set(self._true_positives.keys()))
f_sum = 0
pre_sum = 0
rec_sum = 0
for tag in tags:
tp = self._true_positives[tag]
fn = self._false_negatives[tag]
fp = self._false_positives[tag]
f, pre, rec = _compute_f_pre_rec(self.beta_square, tp, fn, fp)
f_sum += f
pre_sum += pre
rec_sum += rec
if not self.only_gross and tag != '': # tag!=''防止无tag的情况
f_key = 'f-{}'.format(tag)
pre_key = 'pre-{}'.format(tag)
rec_key = 'rec-{}'.format(tag)
evaluate_result[f_key] = f
evaluate_result[pre_key] = pre
evaluate_result[rec_key] = rec
if self.f_type == 'macro':
evaluate_result['f'] = f_sum / len(tags)
evaluate_result['pre'] = pre_sum / len(tags)
evaluate_result['rec'] = rec_sum / len(tags)
if self.f_type == 'micro':
f, pre, rec = _compute_f_pre_rec(self.beta_square,
sum(self._true_positives.values()),
sum(self._false_negatives.values()),
sum(self._false_positives.values()))
evaluate_result['f'] = f
evaluate_result['pre'] = pre
evaluate_result['rec'] = rec
if reset:
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int)
for key, value in evaluate_result.items():
evaluate_result[key] = round(value, 6)
return evaluate_result
def _compute_f_pre_rec(beta_square, tp, fn, fp):
r"""
:param tp: int, true positive
:param fn: int, false negative
:param fp: int, false positive
:return: (f, pre, rec)
"""
pre = tp / (fp + tp + 1e-13)
rec = tp / (fn + tp + 1e-13)
f = (1 + beta_square) * pre * rec / (beta_square * pre + rec + 1e-13)
return f, pre, rec
def _prepare_metrics(metrics):
r"""
Prepare list of Metric based on input
:param metrics:
:return: List[fastNLP.MetricBase]
"""
_metrics = []
if metrics:
if isinstance(metrics, list):
for metric in metrics:
if isinstance(metric, type):
metric = metric()
if isinstance(metric, MetricBase):
metric_name = metric.__class__.__name__
if not callable(metric.evaluate):
raise TypeError(f"{metric_name}.evaluate must be callable, got {type(metric.evaluate)}.")
if not callable(metric.get_metric):
raise TypeError(f"{metric_name}.get_metric must be callable, got {type(metric.get_metric)}.")
_metrics.append(metric)
else:
raise TypeError(
f"The type of metric in metrics must be `fastNLP.MetricBase`, not `{type(metric)}`.")
elif isinstance(metrics, MetricBase):
_metrics = [metrics]
else:
raise TypeError(f"The type of metrics should be `list[fastNLP.MetricBase]` or `fastNLP.MetricBase`, "
f"got {type(metrics)}.")
return _metrics
def _accuracy_topk(y_true, y_prob, k=1):
r"""Compute accuracy of y_true matching top-k probable labels in y_prob.
:param y_true: ndarray, true label, [n_samples]
:param y_prob: ndarray, label probabilities, [n_samples, n_classes]
:param k: int, k in top-k
:returns acc: accuracy of top-k
"""
y_pred_topk = np.argsort(y_prob, axis=-1)[:, -1:-k - 1:-1]
y_true_tile = np.tile(np.expand_dims(y_true, axis=1), (1, k))
y_match = np.any(y_pred_topk == y_true_tile, axis=-1)
acc = np.sum(y_match) / y_match.shape[0]
return acc
def _pred_topk(y_prob, k=1):
r"""Return top-k predicted labels and corresponding probabilities.
:param y_prob: ndarray, size [n_samples, n_classes], probabilities on labels
:param k: int, k of top-k
:returns (y_pred_topk, y_prob_topk):
y_pred_topk: ndarray, size [n_samples, k], predicted top-k labels
y_prob_topk: ndarray, size [n_samples, k], probabilities for top-k labels
"""
y_pred_topk = np.argsort(y_prob, axis=-1)[:, -1:-k - 1:-1]
x_axis_index = np.tile(
np.arange(len(y_prob))[:, np.newaxis],
(1, k))
y_prob_topk = y_prob[x_axis_index, y_pred_topk]
return y_pred_topk, y_prob_topk
class CMRC2018Metric(MetricBase):
r"""
CRMC2018任务的评价metric
"""
def __init__(self, answers=None, raw_chars=None, context_len=None, pred_start=None, pred_end=None):
super().__init__()
self._init_param_map(answers=answers, raw_chars=raw_chars, context_len=context_len, pred_start=pred_start,
pred_end=pred_end)
self.em = 0
self.total = 0
self.f1 = 0
def evaluate(self, answers, raw_chars, pred_start, pred_end, context_len=None):
r"""
:param list[str] answers: 如[["答案1", "答案2", "答案3"], [...], ...]
:param list[str] raw_chars: [["这", "是", ...], [...]]
:param tensor pred_start: batch_size x length 或 batch_size,
:param tensor pred_end: batch_size x length 或 batch_size(是闭区间,包含end位置),
:param tensor context_len: context长度, batch_size
:return:
"""
if pred_start.dim() > 1:
batch_size, max_len = pred_start.size()
context_mask = seq_len_to_mask(context_len, max_len=max_len).eq(False)
pred_start.masked_fill_(context_mask, float('-inf'))
pred_end.masked_fill_(context_mask, float('-inf'))
max_pred_start, pred_start_index = pred_start.max(dim=-1, keepdim=True) # batch_size,
pred_start_mask = pred_start.eq(max_pred_start).cumsum(dim=-1).eq(0) # 只能预测这之后的值
pred_end.masked_fill_(pred_start_mask, float('-inf'))
pred_end_index = pred_end.argmax(dim=-1) + 1
else:
pred_start_index = pred_start
pred_end_index = pred_end + 1
pred_ans = []
for index, (start, end) in enumerate(zip(pred_start_index.flatten().tolist(), pred_end_index.tolist())):
pred_ans.append(''.join(raw_chars[index][start:end]))
for answer, pred_an in zip(answers, pred_ans):
pred_an = pred_an.strip()
self.f1 += _calc_cmrc2018_f1_score(answer, pred_an)
self.total += 1
self.em += _calc_cmrc2018_em_score(answer, pred_an)
def get_metric(self, reset=True):
eval_res = {'f1': round(self.f1 / self.total*100, 2), 'em': round(self.em / self.total*100, 2)}
if reset:
self.em = 0
self.total = 0
self.f1 = 0
return eval_res
# split Chinese
def _cn_segmentation(in_str, rm_punc=False):
in_str = str(in_str).lower().strip()
segs_out = []
temp_str = ""
sp_char = {'-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。', ':', '?', '!', '“', '”', ';', '’', '《',
'》', '……', '·', '、', '「', '」', '(', ')', '-', '~', '『', '』'}
for char in in_str:
if rm_punc and char in sp_char:
continue
if re.search(r'[\u4e00-\u9fa5]', char) or char in sp_char:
if temp_str != "":
ss = list(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
# handling last part
if temp_str != "":
ss = list(temp_str)
segs_out.extend(ss)
return segs_out
# remove punctuation
def _remove_punctuation(in_str):
in_str = str(in_str).lower().strip()
sp_char = ['-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=',
',', '。', ':', '?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、',
'「', '」', '(', ')', '-', '~', '『', '』']
out_segs = []
for char in in_str:
if char in sp_char:
continue
else:
out_segs.append(char)
return ''.join(out_segs)
# find longest common string
def _find_lcs(s1, s2):
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > mmax:
mmax = m[i + 1][j + 1]
p = i + 1
return s1[p - mmax:p], mmax
def _calc_cmrc2018_f1_score(answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = _cn_segmentation(ans, rm_punc=True)
prediction_segs = _cn_segmentation(prediction, rm_punc=True)
lcs, lcs_len = _find_lcs(ans_segs, prediction_segs)
if lcs_len == 0:
f1_scores.append(0)
continue
precision = 1.0 * lcs_len / len(prediction_segs)
recall = 1.0 * lcs_len / len(ans_segs)
f1 = (2 * precision * recall) / (precision + recall)
f1_scores.append(f1)
return max(f1_scores)
def _calc_cmrc2018_em_score(answers, prediction):
em = 0
for ans in answers:
ans_ = _remove_punctuation(ans)
prediction_ = _remove_punctuation(prediction)
if ans_ == prediction_:
em = 1
break
return em
| [
"torch.eq",
"torch.ones_like",
"torch.sum"
] | 1.0.0 | pinkw/fastNLP | ff8b9a37a71e9b7f7787df8a230446d483b5dfdf |
1.4 | import traceback
from torch.autograd import grad
from learn2learn.algorithms.base_learner import BaseLearner
from learn2learn.utils import clone_module
def maml_update(model, lr, grads=None):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/algorithms/maml.py)
**Description**
Performs a MAML update on model using grads and lr.
The function re-routes the Python object, thus avoiding in-place
operations.
NOTE: The model itself is updated in-place (no deepcopy), but the
parameters' tensors are not.
**Arguments**
* **model** (Module) - The model to update.
* **lr** (float) - The learning rate used to update the model.
* **grads** (list, *optional*, default=None) - A list of gradients for each parameter
of the model. If None, will use the gradients in .grad attributes.
**Example**
~~~python
maml = l2l.algorithms.MAML(Model(), lr=0.1)
model = maml.clone() # The next two lines essentially implement model.adapt(loss)
grads = autograd.grad(loss, model.parameters(), create_graph=True)
maml_update(model, lr=0.1, grads)
~~~
"""
if grads is not None:
params = list(model.parameters())
if not len(grads) == len(list(params)):
msg = 'WARNING:maml_update(): Parameters and gradients have different length. ('
msg += str(len(params)) + ' vs ' + str(len(grads)) + ')'
print(msg)
for p, g in zip(params, grads):
p.grad = g
# Update the params
for param_key in model._parameters:
p = model._parameters[param_key]
if p is not None and p.grad is not None:
model._parameters[param_key] = p - lr * p.grad
# Second, handle the buffers if necessary
for buffer_key in model._buffers:
buff = model._buffers[buffer_key]
if buff is not None and buff.grad is not None:
model._buffers[buffer_key] = buff - lr * buff.grad
# Then, recurse for each submodule
for module_key in model._modules:
model._modules[module_key] = maml_update(model._modules[module_key],
lr=lr,
grads=None)
# Finally, rebuild the flattened parameters for RNNs
# See this issue for more details:
# https://github.com/learnables/learn2learn/issues/139
model._apply(lambda x: x)
return model
class MAML(BaseLearner):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/algorithms/maml.py)
**Description**
High-level implementation of *Model-Agnostic Meta-Learning*.
This class wraps an arbitrary nn.Module and augments it with `clone()` and `adapt()`
methods.
For the first-order version of MAML (i.e. FOMAML), set the `first_order` flag to `True`
upon initialization.
**Arguments**
* **model** (Module) - Module to be wrapped.
* **lr** (float) - Fast adaptation learning rate.
* **first_order** (bool, *optional*, default=False) - Whether to use the first-order
approximation of MAML. (FOMAML)
* **allow_unused** (bool, *optional*, default=None) - Whether to allow differentiation
of unused parameters. Defaults to `allow_nograd`.
* **allow_nograd** (bool, *optional*, default=False) - Whether to allow adaptation with
parameters that have `requires_grad = False`.
**References**
1. Finn et al. 2017. "Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks."
**Example**
~~~python
linear = l2l.algorithms.MAML(nn.Linear(20, 10), lr=0.01)
clone = linear.clone()
error = loss(clone(X), y)
clone.adapt(error)
error = loss(clone(X), y)
error.backward()
~~~
"""
def __init__(self,
model,
lr,
first_order=False,
allow_unused=None,
allow_nograd=False):
super(MAML, self).__init__()
self.module = model
self.lr = lr
self.first_order = first_order
self.allow_nograd = allow_nograd
if allow_unused is None:
allow_unused = allow_nograd
self.allow_unused = allow_unused
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
def adapt(self,
loss,
first_order=None,
allow_unused=None,
allow_nograd=None):
"""
**Description**
Takes a gradient step on the loss and updates the cloned parameters in place.
**Arguments**
* **loss** (Tensor) - Loss to minimize upon update.
* **first_order** (bool, *optional*, default=None) - Whether to use first- or
second-order updates. Defaults to self.first_order.
* **allow_unused** (bool, *optional*, default=None) - Whether to allow differentiation
of unused parameters. Defaults to self.allow_unused.
* **allow_nograd** (bool, *optional*, default=None) - Whether to allow adaptation with
parameters that have `requires_grad = False`. Defaults to self.allow_nograd.
"""
if first_order is None:
first_order = self.first_order
if allow_unused is None:
allow_unused = self.allow_unused
if allow_nograd is None:
allow_nograd = self.allow_nograd
second_order = not first_order
if allow_nograd:
# Compute relevant gradients
diff_params = [p for p in self.module.parameters() if p.requires_grad]
grad_params = grad(loss,
diff_params,
retain_graph=second_order,
create_graph=second_order,
allow_unused=allow_unused)
gradients = []
grad_counter = 0
# Handles gradients for non-differentiable parameters
for param in self.module.parameters():
if param.requires_grad:
gradient = grad_params[grad_counter]
grad_counter += 1
else:
gradient = None
gradients.append(gradient)
else:
try:
gradients = grad(loss,
self.module.parameters(),
retain_graph=second_order,
create_graph=second_order,
allow_unused=allow_unused)
except RuntimeError:
traceback.print_exc()
print('learn2learn: Maybe try with allow_nograd=True and/or allow_unused=True ?')
# Update the module
self.module = maml_update(self.module, self.lr, gradients)
def clone(self, first_order=None, allow_unused=None, allow_nograd=None):
"""
**Description**
Returns a `MAML`-wrapped copy of the module whose parameters and buffers
are `torch.clone`d from the original module.
This implies that back-propagating losses on the cloned module will
populate the buffers of the original module.
For more information, refer to learn2learn.clone_module().
**Arguments**
* **first_order** (bool, *optional*, default=None) - Whether the clone uses first-
or second-order updates. Defaults to self.first_order.
* **allow_unused** (bool, *optional*, default=None) - Whether to allow differentiation
of unused parameters. Defaults to self.allow_unused.
* **allow_nograd** (bool, *optional*, default=False) - Whether to allow adaptation with
parameters that have `requires_grad = False`. Defaults to self.allow_nograd.
"""
if first_order is None:
first_order = self.first_order
if allow_unused is None:
allow_unused = self.allow_unused
if allow_nograd is None:
allow_nograd = self.allow_nograd
return MAML(clone_module(self.module),
lr=self.lr,
first_order=first_order,
allow_unused=allow_unused,
allow_nograd=allow_nograd) | [
"torch.autograd.grad"
] | 1.4.0 | JuliousHurtado/Meta-Iteration | 8edf09510c9c8c300c8ca42472e7e04bfd790938 |
1.7 | import os
import numpy as np
import torch
from tensorboardX import SummaryWriter, proto
import distributed
from models.reporter_ext import ReportMgr, Statistics
from others.logging import logger
from others.utils import test_rouge, rouge_results_to_str
import json
import copy
from train_abstractive import baseline
def np_encoder(object):
if isinstance(object, np.generic):
return object.item()
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
return n_params
def build_trainer(args, device_id, model, optim):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
grad_accum_count = args.accum_count
n_gpu = args.world_size
if device_id >= 0:
gpu_rank = int(args.gpu_ranks[device_id])
else:
gpu_rank = 0
n_gpu = 0
print('gpu_rank %d' % gpu_rank)
tensorboard_log_dir = args.model_path
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)
trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)
# print(tr)
if (model):
n_params = _tally_parameters(model)
logger.info('* number of parameters: %d' % n_params)
return trainer
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.models.model.NMTModel`): translation model
to train
train_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.utils.optimizers.Optimizer`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
grad_accum_count(int): accumulate gradients this many times.
report_manager(:obj:`onmt.utils.ReportMgrBase`):
the object that creates reports, or None
model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is
used to save a checkpoint.
Thus nothing will be saved if this parameter is None
"""
def __init__(self, args, model, optim,
grad_accum_count=1, n_gpu=1, gpu_rank=1,
report_manager=None):
# Basic attributes.
self.args = args
self.save_checkpoint_steps = args.save_checkpoint_steps
self.model = model
self.optim = optim
self.grad_accum_count = grad_accum_count
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.report_manager = report_manager
self.loss = torch.nn.BCELoss(reduction='none')
assert grad_accum_count > 0
# Set model in training mode.
if (model):
self.model.train()
def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1):
"""
The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None
"""
logger.info('Start training...')
# step = self.optim._step + 1
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = Statistics()
report_stats = Statistics()
self._start_report_manager(start_time=total_stats.start_time)
while step <= train_steps:
reduce_counter = 0
for i, batch in enumerate(train_iter):
if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
true_batchs.append(batch)
normalization += batch.batch_size
print(batch.batch_size)
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
if self.n_gpu > 1:
normalization = sum(distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0):
self._save(step)
step += 1
if step > train_steps:
break
#if (i % self.n_gpu == self.gpu_rank) and (step % (10 * self.args.report_every) == 0):
# valid_iter = valid_iter_fct()
# self.validate(valid_iter, step=step)
# self.model.train()
train_iter = train_iter_fct()
return total_stats
def validate(self, valid_iter, step=0):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
with torch.no_grad():
for batch in valid_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
self._report_step(0, step, valid_stats=stats)
return stats
def test(self, test_iter, step, cal_lead=False, cal_oracle=False):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _block_tri(c, p):
tri_c = _get_ngrams(3, c.split())
for s in p:
tri_s = _get_ngrams(3, s.split())
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
if (not cal_lead and not cal_oracle):
self.model.eval()
stats = Statistics()
can_path = '%s_step%d.candidate' % (self.args.result_path, step)
gold_path = '%s_step%d.gold' % (self.args.result_path, step)
output_path = '%s.outputs' % (self.args.result_path)
proto_highlights = []
highlights_baseline = []
highlights_labels = []
with open(can_path, 'w') as save_pred:
with open(gold_path, 'w') as save_gold:
with torch.no_grad():
for batch in test_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
query = batch.query
cnndm_labels = list(np.nonzero(labels[0].cpu().numpy())[0])
if query[0] is not None:
baseline_scores = []
for i, sentence in enumerate(batch.src_str[0]):
count = 0
for query_word in query[0]:
if query_word.lower() in sentence.lower():
count += 1
baseline_scores.append((i, count))
keyword_baseline_ids = (sorted(baseline_scores, key=lambda x: x[1], reverse=True))[:3]
keyword_baseline_ids = ([x[0] for x in keyword_baseline_ids])
gold = []
pred = []
ids = []
if (cal_lead):
selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size
elif (cal_oracle):
selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in
range(batch.batch_size)]
else:
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
if (len(sent_scores.shape) == 1):
sent_scores = sent_scores.unsqueeze(0)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
sent_scores = sent_scores + mask.float()
sent_scores = sent_scores.cpu().data.numpy()
selected_ids = np.argsort(-sent_scores, 1)
# selected_ids = np.sort(selected_ids,1)
for i, idx in enumerate(selected_ids):
_pred = []
_ids = []
if (len(batch.src_str[i]) == 0):
continue
for j in selected_ids[i][:len(batch.src_str[i])]:
_ids.append(j)
if (j >= len(batch.src_str[i])):
continue
candidate = batch.src_str[i][j].strip()
if (self.args.block_trigram):
if (not _block_tri(candidate, _pred)):
_pred.append(candidate)
else:
_pred.append(candidate)
if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3):
break
_pred = '<q>'.join(_pred)
if (self.args.recall_eval):
_pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])
pred.append(_pred)
gold.append(batch.tgt_str[i])
ids.append(_ids)
#print(ids)
proto_highlights.append({"ids": ids[0], "query": query[0], "text": batch.src_str[0]})
if query[0] is not None:
highlights_baseline.append({"ids": keyword_baseline_ids, "query": query[0], "text": batch.src_str[0]})
highlights_labels.append({"ids": cnndm_labels, "query": query[0], "text": batch.src_str[0]})
for i in range(len(gold)):
save_gold.write(gold[i].strip() + '\n')
for i in range(len(pred)):
save_pred.write(pred[i].strip() + '\n')
with open(output_path, 'w') as f:
json.dump(proto_highlights, f, default=np_encoder)
with open(output_path + ".keyword_baseline", 'w') as f:
json.dump(highlights_baseline, f, default=np_encoder)
with open(output_path + ".cnndm_baseline", 'w') as f:
json.dump(highlights_labels, f, default=np_encoder)
if (step != -1 and self.args.report_rouge):
rouges = test_rouge(self.args.temp_dir, can_path, gold_path)
logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges)))
self._report_step(0, step, valid_stats=stats)
return stats
def _gradient_accumulation(self, true_batchs, normalization, total_stats,
report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
if self.grad_accum_count == 1:
self.model.zero_grad()
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
(loss / loss.numel()).backward()
# loss.div(float(normalization)).backward()
batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# 4. Update the parameters and statistics.
if self.grad_accum_count == 1:
# Multi GPU gradient gather
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
# in case of multi step gradient accumulation,
# update only after accum batches
if self.grad_accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _save(self, step):
real_model = self.model
# real_generator = (self.generator.module
# if isinstance(self.generator, torch.nn.DataParallel)
# else self.generator)
model_state_dict = real_model.state_dict()
# generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
# 'generator': generator_state_dict,
'opt': self.args,
'optim': self.optim,
}
checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)
logger.info("Saving checkpoint %s" % checkpoint_path)
# checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)
if (not os.path.exists(checkpoint_path)):
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
def _maybe_save(self, step):
"""
Save the model if a model saver is set
"""
if self.model_saver is not None:
self.model_saver.maybe_save(step)
| [
"torch.no_grad",
"torch.save",
"torch.nn.BCELoss"
] | 1.7.1 | oja/qfsumm | dfa3541cfad928df412c86888ef0354ea97e8382 |
1.9 | import torch
from torch import nn
from hw_asr.base import BaseModel
class DeepSpeechModel(BaseModel):
def __init__(self, n_feats, n_class, hidden_size, n_layers, dropout,
*args, **kwargs):
super().__init__(n_feats, n_class, *args, **kwargs)
self.convolutional = nn.Sequential(
nn.Conv2d(1, 32, (41, 11), stride=(2, 2), padding=(20, 5)),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 32, (21, 11), stride=(2, 1), padding=(10, 5)),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 96, (21, 11), stride=(2, 1), padding=(10, 5)),
nn.BatchNorm2d(96),
nn.ReLU()
)
rnn_input_size = 96 * n_feats // 8
self.lstm = nn.LSTM(input_size=rnn_input_size, hidden_size=hidden_size,
num_layers=n_layers, batch_first=True,
dropout=dropout, bidirectional=True)
self.bn = nn.BatchNorm1d(2 * hidden_size)
self.fc = nn.Linear(2 * hidden_size, n_class)
def forward(self, spectrogram, *args, **kwargs):
x = self.convolutional(torch.transpose(spectrogram, 1, 2).unsqueeze(1)
)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2],
sizes[3]) # Collapse feature dimension
x = x.transpose(1, 2)
lstm_out, _ = self.lstm(x)
lstm_out = torch.transpose(lstm_out, 1, 2)
x = self.bn(lstm_out)
x = torch.transpose(x, 1, 2)
x = self.fc(x)
return {"logits": x}
def transform_input_lengths(self, input_lengths):
return input_lengths // 2
| [
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.transpose"
] | 1.9.1 | WhiteTeaDragon/hw-asr | 78a767ab00a743b8d28d1fdad795f066fc0795da |
1.5 | """
File: image_io.py
Author: Nrupatunga
Email: [email protected]
Github: https://github.com/nrupatunga
Description: Image IO
"""
import numpy as np
import torch
from PIL import Image
from torchvision import get_image_backend
try:
import accimage
except ImportError:
accimage = None
def _pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def _accimage_loader(path):
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return _pil_loader(path)
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def image_to_tensor(pic, scale=255):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(scale)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = scale * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(scale)
else:
return img
def load_grayscale(path):
if get_image_backend() == 'accimage':
img = _accimage_loader(path)
else:
img = _pil_loader(path)
channels = img.split()
if len(channels) == 3:
img = Image.merge("RGB", [channels[2], channels[1], channels[0]])
return img.convert('L')
def load_grayscale_np(path):
if get_image_backend() == 'accimage':
img = _accimage_loader(path)
else:
img = _pil_loader(path)
channels = img.split()
if len(channels) == 3:
img = Image.merge("RGB", [channels[2], channels[1], channels[0]])
return np.asarray(img.convert('L'))
def load(path, image_size=None):
if get_image_backend() == 'accimage':
img = _accimage_loader(path)
else:
img = _pil_loader(path)
channels = img.split()
if len(channels) == 1:
img = img.convert('L')
else: # Make sure it is BGR
img = Image.merge("RGB", [channels[2], channels[1], channels[0]])
if image_size is not None:
if (image_size[0] == 1 and len(channels) == 3):
img = img.convert('L')
if image_size[1] == img.width and image_size[2] == img.height:
return img
return img.resize((image_size[1], image_size[2]), Image.BILINEAR)
else:
return img
def load_np(path, image_size=None):
if get_image_backend() == 'accimage':
img = _accimage_loader(path)
else:
img = _pil_loader(path)
channels = img.split()
if len(channels) == 1:
img = img.convert('L')
else: # Make sure it is BGR
img = Image.merge("RGB", [channels[2], channels[1], channels[0]])
if image_size is not None:
if (image_size[0] == 1 and len(channels) == 3):
img = img.convert('L')
if image_size[1] == img.width and image_size[2] == img.height:
return img
return np.asarray(img.resize((image_size[1], image_size[2]),
Image.BILINEAR))
else:
return np.asarray(img)
def resize(img, size):
"""resize numpy array
"""
if _is_numpy_image(img):
img = Image.fromarray(img)
return img.resize(size, Image.BILINEAR)
| [
"torch.is_tensor",
"torch.from_numpy"
] | 1.5.0 | nrupatunga/pytorch-deaf | 751a37669e78f6671a26cb5cff42c05139bf3c41 |
1.11 | import logging
import os
import numpy as np
from tqdm import tqdm
import argparse
from pprint import pprint, pformat
import time
import logging
import nltk
import torch
from torch.utils.data import Dataset, DataLoader
from data_provider.utils import GloveTokenizer
from config_file import *
class CMUDoGDataset(Dataset):
def __init__(self, args, dial_path, glove_path=None, char_path=None, data_cache_path=None):
self.args = args
super(CMUDoGDataset, self).__init__()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.CRITICAL,
)
self.logger = logging.getLogger('data_provider')
self.tokenizer = GloveTokenizer(glove_path=glove_path)
# if True:
if args.debug or not os.path.exists(data_cache_path):
start = time.time()
self.contexts, self.candidate_lists, self.sparse_labels, self.document_lists, self.n_turns, self.n_documents = [], [], [], [], [], []
self.statistics = {'min_#token/context': 99999, 'aver_#token/context': 0, 'max_#token/context': 0,
'min_#token/document': 99999, 'aver_#token/document': 0, 'max_#token/document': 0,
'min_#token/response': 99999, 'aver_#token/response': 0, 'max_#token/response': 0}
with open(dial_path) as f:
for index, line in enumerate(f):
if args.debug and index > 100:
break
fields = line.strip().split('\t')
context = (fields[0] + ' ').split(' _eos_ ')[:-1]
candidate_list = fields[1].split('|')
label = int(fields[2])
document_list = fields[3].split('|')
self.contexts.append(context)
self.candidate_lists.append(candidate_list)
self.sparse_labels.append(label)
self.document_lists.append(document_list)
self.n_turns.append(len(context))
self.n_documents.append(len(document_list))
self.context_lens, self.document_lens, self.candidate_list_lens = None, None, None
self.context_lens, self.document_lens, self.candidate_list_lens = self.w2v_preprocessing()
self.samples = self.make_samples()
cache = {'samples': self.samples,
'statistics': self.statistics}
end = time.time()
self.logger.info('Preprocessing done, costing %s mins' % ((end - start) / 60))
if not args.debug:
torch.save(cache, data_cache_path)
else:
start = time.time()
self.logger.info('loading cache from [%s]' % data_cache_path)
cache = torch.load(data_cache_path)
self.samples = cache['samples']
self.statistics = cache['statistics']
end = time.time()
self.logger.info('Cache loaded, costing %s mins' % ((end - start) / 60))
self.logger.info(pformat(self.statistics, indent=4))
def w2v_preprocessing(self):
# tokenization & numericalization
## tokenize & numericalize dials and docs
self.logger.debug('tokenzing & numericalizing dialogues and docs...')
def tokenize_and_numericalize(sentence_lists):
all_sentence_lists = []
for i in tqdm(range(len(sentence_lists))):
preprocessed_sentence_list = []
for j in range(len(sentence_lists[i])):
tokens = self.tokenizer.tokenize(sentence_lists[i][j])
tokenized_utterance = self.tokenizer.convert_tokens_to_ids(tokens)
preprocessed_sentence_list.append(tokenized_utterance)
all_sentence_lists.append(preprocessed_sentence_list)
return all_sentence_lists
self.contexts = tokenize_and_numericalize(self.contexts)
self.candidate_lists = tokenize_and_numericalize(self.candidate_lists)
self.document_lists = tokenize_and_numericalize(self.document_lists)
# truncate and pad
## truncate & pad dials and docs
context_lens, document_lens, candidate_list_lens = [], [], []
## dial
for i in tqdm(range(len(self.contexts))):
context_len = []
self.contexts[i] = self.contexts[i][-self.args.max_turn_num:]
for j in range(len(self.contexts[i])):
self.contexts[i][j] = self.contexts[i][j][:self.args.seq_len]
context_len.append(len(self.contexts[i][j]))
self.contexts[i][j] += [0] * (self.args.seq_len - len(self.contexts[i][j]))
self.contexts[i] = [[0] * self.args.seq_len] * (self.args.max_turn_num - len(self.contexts[i])) + self.contexts[i]
context_len = [0] * (self.args.max_turn_num - len(context_len)) + context_len
context_lens.append(context_len)
for i in tqdm(range(len(self.candidate_lists))):
candidate_list_len = []
for j in range(len(self.candidate_lists[i])):
self.candidate_lists[i][j] = self.candidate_lists[i][j][:self.args.seq_len]
candidate_list_len.append(len(self.candidate_lists[i][j]))
self.candidate_lists[i][j] += [0] * (self.args.seq_len - len(self.candidate_lists[i][j]))
candidate_list_lens.append(candidate_list_len)
## docs
self.logger.debug('truncating and padding documents...')
for i in tqdm(range(len(self.document_lists))):
document_len = []
self.document_lists[i] = self.document_lists[i][:self.args.max_doc_num]
for j in range(len(self.document_lists[i])):
self.document_lists[i][j] = self.document_lists[i][j][:self.args.doc_len]
document_len.append(len(self.document_lists[i][j]))
self.document_lists[i][j] += [0] * (self.args.doc_len - len(self.document_lists[i][j]))
self.document_lists[i] += [[0] * self.args.doc_len] * (self.args.max_doc_num - len(self.document_lists[i]))
document_len += [0] * (self.args.max_doc_num - len(document_len))
document_lens.append(document_len)
return context_lens, document_lens, candidate_list_lens
def make_samples(self):
samples = []
for i in range(len(self.contexts)):
# for context, candidate_list, document_list, sparse_label, n_turn, n_document, context_len, document_len, candidate_list_len in zip(self.contexts, self.candidate_lists, self.document_lists, self.context_chars, self.candidate_chars, self.document_chars, self.sparse_labels, self.n_turns, self.n_documents, self.context_lens, self.document_lens, self.candidate_list_lens):
for index, (candidate, response_len) in enumerate(zip(self.candidate_lists[i], self.candidate_list_lens[i])):
label = 1 if index == self.sparse_labels[i] else 0
sample = {'context': self.contexts[i],
'document': self.document_lists[i],
'response': candidate,
'context_len': self.context_lens[i],
'document_len': self.document_lens[i],
'response_len': response_len,
'label': label,
'n_turn': min(self.n_turns[i], self.args.max_turn_num),
'n_document': min(self.n_documents[i], self.args.max_doc_num)}
samples.append(sample)
return samples
def __getitem__(self, index):
sample = self.samples[index]
context = torch.tensor(sample['context'], dtype=torch.long)
response = torch.tensor(sample['response'], dtype=torch.long)
document = torch.tensor(sample['document'], dtype=torch.long)
context_len = torch.tensor(sample['context_len'], dtype=torch.long)
response_len = torch.tensor(sample['response_len'], dtype=torch.long)
document_len = torch.tensor(sample['document_len'], dtype=torch.long)
n_turn = torch.tensor(sample['n_turn'], dtype=torch.int)
n_document = torch.tensor(sample['n_document'], dtype=torch.int)
label = torch.tensor(sample['label'], dtype=torch.long)
pos1d_ids = torch.tensor(np.arange(0, len(document)), dtype=torch.long)
inputs = {'context': context,
'response': response,
'document': document,
'context_len': context_len,
'document_len': document_len,
'response_len': response_len,
'label': label,
'pos1d_ids': pos1d_ids,
'n_turn': n_turn,
'n_document': n_document}
return inputs
def __len__(self):
return len(self.samples)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--doc_len", default=40, type=int, help='Maximum #tokens/doc')
parser.add_argument("--seq_len", default=40, type=int, help='Maximum #tokens/turn')
parser.add_argument("--max_turn_num", default=4, type=int, help='Maximum #turn')
parser.add_argument("--max_doc_num", default=20, type=int, help='Maximum #doc')
args = parser.parse_args()
dataset = CMUDoGDataset(args=args,
dial_path=cmudog_train_dial_path,
glove_path=cmudog_glove_path,
data_cache_path='temp.pkl')
dataloader = DataLoader(dataset=dataset,
batch_size=40,
shuffle=False)
for index, inputs in enumerate(dataloader):
print(inputs)
for key, value in inputs.items():
print(key, ':', value.shape)
| [
"torch.save",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.11.0 | Coldog2333/DGMN-pytorch | c34248afca516625c2ac2fc6d6f4ce8fe2988c99 |
1.7 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import time
import numpy as np
import torch
import torch.nn as nn
# device = 'cpu'
# if torch.cuda.is_available():
# device = 'cuda'
"""
BLSTM (max/mean) encoder
"""
class InferSent(nn.Module):
def __init__(self, config, device='cuda'):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
self.device = device
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = torch.sort(sent_len, descending=True)
# sent_len_sorted = sent_len_sorted.copy()
idx_unsort = torch.sort(idx_sort)[1]
idx_sort = idx_sort.to(self.device)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = idx_unsort.to(self.device)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
# sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).to(device)
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb, sent_output.permute(1, 0, 2)
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)' % (
len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = torch.sort(lengths)[0], torch.sort(-lengths)[1]
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.to(self.device)
with torch.no_grad():
batch = self.forward(
(batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = torch.sort(idx_sort)[1]
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings) / (time.time() - tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [
[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.to(self.device)
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
"""
BiGRU encoder (first/last hidden states)
"""
class BGRUlastEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(BGRUlastEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.GRU(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True, dropout=self.dpout_model)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: seqlen x batch x worddim
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output, hn = self.enc_lstm(sent_packed)
emb = torch.cat((hn[0], hn[1]), 1) # batch x 2*nhid
sent_output_un, _ = torch.nn.utils.rnn.pad_packed_sequence(sent_output,
batch_first=True)
# Un-sort by length
idx_unsort = torch.sort(idx_sort)[1]
emb = emb.index_select(0, torch.LongTensor(idx_unsort).to(self.device))
return emb, sent_output_un
"""
BLSTM encoder with projection after BiLSTM
"""
class BLSTMprojEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(BLSTMprojEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True, dropout=self.dpout_model)
self.proj_enc = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = self.proj_enc(sent_output.view(-1, 2 * self.enc_lstm_dim)).view(
-1, bsize, 2 * self.enc_lstm_dim)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len).unsqueeze(1).to(self.device)
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
emb = torch.max(sent_output, 0)[0].squeeze(0)
return emb, sent_output.permute(1, 0, 2)
"""
LSTM encoder
"""
class LSTMEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(LSTMEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=False, dropout=self.dpout_model)
self.device = device
def forward(self, sent_tuple):
# sent_len [max_len, ..., min_len] (batch)
# sent (seqlen x batch x worddim)
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
# Handling padding in Recurrent Networks
sent_len = np.array(sent_len)
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output, hn = self.enc_lstm(sent_packed) # batch x 2*nhid
sent_hn = hn[0].squeeze(0)
# Un-sort by length
sent_output_un, _ = torch.nn.utils.rnn.pad_packed_sequence(sent_output,
batch_first=True)
idx_unsort = np.argsort(idx_sort)
emb = sent_hn.index_select(0, torch.LongTensor(idx_unsort).to(self.device))
return emb, sent_output_un
"""
GRU encoder
"""
class GRUEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(GRUEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.GRU(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=False, dropout=self.dpout_model)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output, hn = self.enc_lstm(sent_packed)
sent_hn = hn.squeeze(0)
# batch x 2*nhid
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
emb = sent_hn.index_select(0, torch.LongTensor(idx_unsort).to(self.device))
return emb, sent_output
"""
Inner attention from "hierarchical attention for document classification"
"""
class InnerAttentionNAACLEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(InnerAttentionNAACLEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'], bidirectional=True)
self.proj_key = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.proj_lstm = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.query_embedding = nn.Embedding(1, 2 * self.enc_lstm_dim)
self.softmax = nn.Softmax()
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = sent_output.transpose(0, 1).contiguous()
sent_output_proj = self.proj_lstm(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1,
2 * self.enc_lstm_dim)
sent_key_proj = self.proj_key(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1, 2 * self.enc_lstm_dim)
sent_key_proj = torch.tanh(sent_key_proj)
# NAACL paper: u_it=tanh(W_w.h_it + b_w) (bsize, seqlen, 2nhid)
sent_w = self.query_embedding(
torch.LongTensor(bsize * [0]).to(self.device)).unsqueeze(
2) # (bsize, 2*nhid, 1)
Temp = 2
keys = sent_key_proj.bmm(sent_w).squeeze(2) / Temp
# Set probas of padding to zero in softmax
keys = keys + ((keys == 0).float() * -10000)
alphas = self.softmax(keys / Temp).unsqueeze(2).expand_as(sent_output)
# if int(time.time()) % 100 == 0:
# print('w', torch.max(sent_w), torch.min(sent_w))
# print('alphas', alphas[0, :, 0])
emb = torch.sum(alphas * sent_output_proj, 1).squeeze(1)
return emb, sent_output
"""
Inner attention inspired from "Self-attentive ..."
"""
class InnerAttentionMILAEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(InnerAttentionMILAEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'], bidirectional=True)
self.proj_key = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.proj_lstm = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.query_embedding = nn.Embedding(2, 2 * self.enc_lstm_dim)
self.softmax = nn.Softmax()
self.device = 'cuda'
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = sent_output.transpose(0, 1).contiguous()
sent_output_proj = self.proj_lstm(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1,
2 * self.enc_lstm_dim)
sent_key_proj = self.proj_key(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1, 2 * self.enc_lstm_dim)
sent_key_proj = torch.tanh(sent_key_proj)
# NAACL : u_it=tanh(W_w.h_it + b_w) like in NAACL paper
# Temperature
Temp = 3
sent_w1 = self.query_embedding(
torch.LongTensor(bsize * [0]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys1 = sent_key_proj.bmm(sent_w1).squeeze(2) / Temp
keys1 = keys1 + ((keys1 == 0).float() * -1000)
alphas1 = self.softmax(keys1).unsqueeze(2).expand_as(sent_key_proj)
emb1 = torch.sum(alphas1 * sent_output_proj, 1).squeeze(1)
sent_w2 = self.query_embedding(
torch.LongTensor(bsize * [1]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys2 = sent_key_proj.bmm(sent_w2).squeeze(2) / Temp
keys2 = keys2 + ((keys2 == 0).float() * -1000)
alphas2 = self.softmax(keys2).unsqueeze(2).expand_as(sent_key_proj)
emb2 = torch.sum(alphas2 * sent_output_proj, 1).squeeze(1)
sent_w3 = self.query_embedding(
torch.LongTensor(bsize * [1]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys3 = sent_key_proj.bmm(sent_w3).squeeze(2) / Temp
keys3 = keys3 + ((keys3 == 0).float() * -1000)
alphas3 = self.softmax(keys3).unsqueeze(2).expand_as(sent_key_proj)
emb3 = torch.sum(alphas3 * sent_output_proj, 1).squeeze(1)
sent_w4 = self.query_embedding(
torch.LongTensor(bsize * [1]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys4 = sent_key_proj.bmm(sent_w4).squeeze(2) / Temp
keys4 = keys4 + ((keys4 == 0).float() * -1000)
alphas4 = self.softmax(keys4).unsqueeze(2).expand_as(sent_key_proj)
emb4 = torch.sum(alphas4 * sent_output_proj, 1).squeeze(1)
# if int(time.time()) % 100 == 0:
# print('alphas', torch.cat((alphas1.data[0, :, 0],
# alphas2.data[0, :, 0],
# torch.abs(alphas1.data[0, :, 0] -
# alphas2.data[0, :, 0])), 1))
emb = torch.cat((emb1, emb2, emb3, emb4), 1)
return emb, sent_output
"""
Inner attention from Yang et al.
"""
class InnerAttentionYANGEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(InnerAttentionYANGEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True)
self.proj_lstm = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=True)
self.proj_query = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=True)
self.proj_enc = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=True)
self.query_embedding = nn.Embedding(1, 2 * self.enc_lstm_dim)
self.softmax = nn.Softmax()
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = sent_output.transpose(0, 1).contiguous()
sent_output_proj = self.proj_lstm(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1,
2 * self.enc_lstm_dim)
sent_keys = self.proj_enc(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(bsize,
-1,
2 * self.enc_lstm_dim)
sent_max = torch.max(sent_output, 1)[0].squeeze(1) # (bsize, 2*nhid)
sent_summary = self.proj_query(sent_max).unsqueeze(1).expand_as(sent_keys)
# (bsize, seqlen, 2*nhid)
sent_M = torch.tanh(sent_keys + sent_summary)
# (bsize, seqlen, 2*nhid) YANG : M = tanh(Wh_i + Wh_avg
sent_w = self.query_embedding(
torch.LongTensor(bsize * [0]).to(self.device)).unsqueeze(2)
# (bsize, 2*nhid, 1)
sent_alphas = self.softmax(sent_M.bmm(sent_w).squeeze(2)).unsqueeze(1)
# (bsize, 1, seqlen)
# if int(time.time()) % 200 == 0:
# print('w', torch.max(sent_w[0]), torch.min(sent_w[0]))
# print('alphas', sent_alphas[0][0][0:sent_len[0]])
# # Get attention vector
emb = sent_alphas.bmm(sent_output_proj).squeeze(1)
return emb, sent_output
"""
Hierarchical ConvNet
"""
class ConvNetEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(ConvNetEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.convnet1 = nn.Sequential(
nn.Conv1d(self.word_emb_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.convnet2 = nn.Sequential(
nn.Conv1d(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.convnet3 = nn.Sequential(
nn.Conv1d(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.convnet4 = nn.Sequential(
nn.Conv1d(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
sent = sent.transpose(0, 1).transpose(1, 2).contiguous()
# batch, nhid, seqlen)
sent = self.convnet1(sent)
u1 = torch.max(sent, 2)[0]
sent = self.convnet2(sent)
u2 = torch.max(sent, 2)[0]
sent = self.convnet3(sent)
u3 = torch.max(sent, 2)[0]
sent = self.convnet4(sent)
u4 = torch.max(sent, 2)[0]
emb = torch.cat((u1, u2, u3, u4), 1)
return emb, sent.permute(0, 2, 1)
"""
BiLSTM
"""
class BiLSTM(nn.Module):
def __init__(self, config, device='cuda'):
super(BiLSTM, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 2,
bidirectional=True, dropout=self.dpout_model)
self.relu = nn.ReLU()
self.projection = nn.Linear(self.word_emb_dim, self.enc_lstm_dim)
self.device = device
def forward(self, sent_tuple):
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
bsize = sent.size(1)
sent_proj = self.relu(self.projection(sent))
out, (emb_ht, _) = self.enc_lstm(sent_proj)
emb = emb_ht[-2:].transpose(0, 1).contiguous().view(bsize, -1)
return emb, out
"""
Main module for Natural Language Inference
"""
class NLINet(nn.Module):
def __init__(self, config, weights=None, device='cuda'):
super(NLINet, self).__init__()
# classifier
self.nonlinear_fc = config['nonlinear_fc']
self.fc_dim = config['fc_dim']
self.n_classes = config['n_classes']
self.enc_lstm_dim = config['enc_lstm_dim']
self.encoder_type = config['encoder_type']
self.dpout_fc = config['dpout_fc']
self.embedding = nn.Embedding(config['n_words'], config['word_emb_dim'])
self.embedding.load_state_dict({'weight': weights})
self.embedding.weight.requires_grad = False
self.encoder = eval(self.encoder_type)(config, device=device)
self.inputdim = 4 * 2 * self.enc_lstm_dim
self.inputdim = 4 * self.inputdim if self.encoder_type in \
["ConvNetEncoder",
"InnerAttentionMILAEncoder"] else self.inputdim
self.inputdim = self.inputdim / 2 if self.encoder_type == "LSTMEncoder" \
else self.inputdim
self.inputdim = int(self.inputdim)
self.lin1 = nn.Linear(self.inputdim, self.fc_dim)
self.lin2 = nn.Linear(self.fc_dim, self.fc_dim)
self.lin3 = nn.Linear(self.fc_dim, self.n_classes)
for lin in [self.lin1, self.lin2, self.lin3]:
nn.init.xavier_uniform_(lin.weight)
nn.init.zeros_(lin.bias)
if self.nonlinear_fc:
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.inputdim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.n_classes),
)
else:
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
self.lin1,
nn.ReLU(),
nn.Dropout(p=self.dpout_fc),
self.lin2,
nn.ReLU(),
nn.Dropout(p=self.dpout_fc),
self.lin3
)
def forward(self, s1, s2):
# s1 : (s1, s1_len)
s1_embed = self.embedding(s1[0])
s2_embed = self.embedding(s2[0])
u, s1_out = self.encoder((s1_embed, s1[1]))
v, s2_out = self.encoder((s2_embed, s2[1]))
features = torch.cat((u, v, torch.abs(u - v), u * v), 1)
output = self.classifier(features)
return output, (s1_out, s2_out)
def encode(self, s1, is_probe=False):
# s1 : (s1, s1_len)
s1_embed = self.embedding(s1[0])
emb, out = self.encoder((s1_embed, s1[1]))
return emb, out
"""
Main module for Classification
"""
class ClassificationNet(nn.Module):
def __init__(self, config):
super(ClassificationNet, self).__init__()
# classifier
self.nonlinear_fc = config['nonlinear_fc']
self.fc_dim = config['fc_dim']
self.n_classes = config['n_classes']
self.enc_lstm_dim = config['enc_lstm_dim']
self.encoder_type = config['encoder_type']
self.dpout_fc = config['dpout_fc']
self.encoder = eval(self.encoder_type)(config)
self.inputdim = 2 * self.enc_lstm_dim
self.inputdim = 4 * self.inputdim if self.encoder_type == "ConvNetEncoder" else self.inputdim
self.inputdim = self.enc_lstm_dim if self.encoder_type == "LSTMEncoder" else self.inputdim
self.classifier = nn.Sequential(
nn.Linear(self.inputdim, 512),
nn.Linear(512, self.n_classes),
)
def forward(self, s1):
# s1 : (s1, s1_len)
u = self.encoder(s1)
output = self.classifier(u)
return output
def encode(self, s1):
emb, output = self.encoder(s1)
return emb, output
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.GRU",
"torch.LongTensor",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.sum",
"torch.nn.Softmax",
"torch.nn.Conv1d",
"torch.FloatTensor",
"torch.abs",
"torch.nn.init.zeros_",
"torch.max",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.sort",
"torch.nn.Dropout",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.tanh",
"torch.nn.Embedding"
] | 1.7.1 | chandar-lab/CriticalGradientOptimization | 1af4b1df40489991289bb50bb69859a00b2c97c6 |
0.1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the rhythmic dynamic movement primitive.
"""
import numpy as np
import torch
from pyrobolearn.models.dmp.dmpytorch.canonical_systems import RhythmicCS
from pyrobolearn.models.dmp.dmpytorch.forcing_terms import RhythmicForcingTerm
from pyrobolearn.models.dmp.dmpytorch.dmp import DMP
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "[email protected]"
__status__ = "Development"
class RhythmicDMP(DMP):
r"""Rhythmic Dynamic Movement Primitive
Rhythmic DMPs have the same mathematical formulation as general DMPs, which is given by:
.. math:: \tau^2 \ddot{y} = K (g - y) - D \tau \dot{y} + f(s)
where :math:`\tau` is a scaling factor that allows to slow down or speed up the reproduced movement, :math:`K`
is the stiffness coefficient, :math:`D` is the damping coefficient, :math:`y, \dot{y}, \ddot{y}` are the position,
velocity, and acceleration of a DoF, and :math:`f(s)` is the non-linear forcing term.
However, the forcing term in the case of rhythmic DMPs is given by:
.. math:: f(s) = \frac{\sum_i \psi_i(s) w_i}{\sum_i \psi_i(s)} a
where :math:`w` are the learnable weight parameters, and :math:`\psi` are the basis functions evaluated at the
given input phase variable :math:`s`, and :math:`a` is the amplitude.
The basis functions (in the rhythmic case) are given by:
.. math:: \psi_i(s) = \exp \left( - h_i (\cos(s - c_i) - 1) \right)
where :math:`c_i` is the center of the basis, and :math:`h_i` is a measure of concentration.
Also, the canonical system associated with this transformation system is given by:
.. math:: \tau \dot{s} = 1
where :math:`\tau` is a scaling factor that allows to slow down or speed up the movement, and :math:`s` is the
phase variable that drives the DMP.
All these differential equations are solved using Euler's method.
References:
[1] "Dynamical movement primitives: Learning attractor models for motor behaviors", Ijspeert et al., 2013
"""
def __init__(self, num_dmps, num_basis, dt=0.01, y0=0, goal=1,
forces=None, stiffness=None, damping=None):
"""Initialize the rhythmic DMP
Args:
num_dmps (int): number of DMPs
num_basis (int): number of basis functions
dt (float): step integration for Euler's method
y0 (float, np.array): initial position(s)
goal (float, np.array): goal(s)
forces (list, ForcingTerm): the forcing terms (which can have different basis functions)
stiffness (float): stiffness coefficient
damping (float): damping coefficient
"""
# create rhythmic canonical system
cs = RhythmicCS(dt=dt)
# create forcing terms (each one contains the basis functions and learnable weights)
if forces is None:
if isinstance(num_basis, int):
forces = [RhythmicForcingTerm(cs, num_basis) for _ in range(num_dmps)]
else:
if not isinstance(num_basis, (np.ndarray, list, tuple, set)):
raise TypeError("Expecting 'num_basis' to be an int, list, tuple, np.array or set.")
if len(num_basis) != num_dmps:
raise ValueError("The length of th list of number of basis doesn't match the number of DMPs")
forces = [RhythmicForcingTerm(cs, n_basis) for n_basis in num_basis]
# call super class constructor
super(RhythmicDMP, self).__init__(canonical_system=cs, forces=forces, y0=y0, goal=goal,
stiffness=stiffness, damping=damping)
def get_scaling_term(self, new_goal=None):
"""
Return the scaling term for the forcing term. For rhythmic DMPs it's non-diminishing, so this function just
returns 1.
"""
return torch.ones(self.num_dmps)
def _generate_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs, the goal is the average of the desired trajectory.
Args:
y_des (float[M,T]): the desired trajectory to follow (with shape [num_dmps, timesteps])
Returns:
float[M]: goal positions (one for each DMP)
"""
goal = np.zeros(self.num_dmps)
for n in range(self.num_dmps):
num_idx = ~torch.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = .5 * (y_des[n, num_idx].min() + y_des[n, num_idx].max())
return goal
| [
"torch.isnan",
"torch.ones"
] | 0.1.0 | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 |
0.1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Defines the common loss functions that are used by the learning algorithm / optimizer.
Losses are evaluated on model parameters, data batches / storages, or transitions tuples.
"""
import torch
from pyrobolearn.losses.loss import Loss
from pyrobolearn.storages import Batch
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "[email protected]"
__status__ = "Development"
class BatchLoss(Loss):
r"""Loss evaluated on a batch.
"""
def __init__(self):
super(BatchLoss, self).__init__()
# cache the last value that was computed by the loss
self.value = None
def _compute(self, batch):
"""Compute the loss on the given batch. This method has to be implemented in the child classes."""
raise NotImplementedError
def compute(self, batch):
"""
Compute the loss on the given batch.
Args:
batch (Batch): batch to evaluate the loss on.
Returns:
torch.Tensor: scalar loss value.
"""
# check that we are given a batch
if not isinstance(batch, Batch):
raise TypeError("Expecting the given 'batch' to be an instance of `Batch`, instead got: "
"{}".format(type(batch)))
self.value = self._compute(batch)
return self.value
class FixedLoss(BatchLoss):
r"""Fixed Loss
This is a dummy loss that returned always the same values given initially.
"""
def __init__(self, value):
"""
Initialize the fixed loss.
Args:
value (torch.Tensor, float, int, np.array, list): fixed initial values that will be returned at each call.
"""
super(FixedLoss, self).__init__()
self.value = torch.tensor(value)
def _compute(self, batch):
"""
Compute the fixed loss.
Args:
batch (Batch): batch containing the states, actions, rewards, etc.
Returns:
torch.Tensor: loss scalar value
"""
return self.value
class L2Loss(BatchLoss):
r"""L2 Loss
Compute the L2 loss given by: :math:`1/2 * (y_{target} - y_{predict})^2`
"""
def __init__(self, target, predictor):
"""
Initialize the L2 loss.
Args:
target (callable): callable target that accepts a Batch instance as input. If it is not in the given batch,
it will give the batch to it.
predictor (callable): callable predictor that accepts a Batch instance as input. If it is not in the given
batch, it will give the batch to it.
"""
super(L2Loss, self).__init__()
self._target = target
self._predictor = predictor
def _compute(self, batch):
r"""
Compute the L2 loss: :math:`1/2 * (y_{target} - y_{predict})^2`.
Args:
batch (Batch): batch containing the states, actions, rewards, etc.
Returns:
torch.Tensor: loss scalar value
"""
# get target data
if self._target in batch.current:
target = batch.current[self._target]
elif self._target in batch:
target = batch[self._target]
else:
target = self._target(batch)
# get predicted data
if self._predictor in batch.current:
output = batch.current[self._predictor]
elif self._predictor in batch:
output = batch[self._predictor]
else:
output = self._predictor(batch)
# compute L2 loss
return 0.5 * (target - output).pow(2).mean()
class HuberLoss(BatchLoss):
r"""Huber Loss
"In statistics, the Huber loss is a loss function used in robust regression, that is less sensitive to outliers
in data than the squared error loss." [1]
This loss is given by [1]:
.. math:: {\mathcal L}_{\delta}(a) = \left\{ \begin{array}{lc} 1/2 a^2 & for |a| \leq \delta, \\
\delta (|a| - 1/2 \delta), & \mbox{otherwise} \left \end{array}
"This function is quadratic for small values of :math:`a`, and linear for large values, with equal values and
slopes of the different sections at the two points where :math:`|a| = \delta`" [1].
In [2], this loss is used for DQN, where :math:`\delta=1`, and :math:`a` is the temporal difference error, that is,
:math:`a = Q(s,a) - (r + \gamma \max_a Q(s',a))` where :math:`(r + \gamma \max_a Q(s',a))` is the target function.
References:
- [1] Huber Loss (on Wikipedia): https://en.wikipedia.org/wiki/Huber_loss
- [2] "Reinforcement Learning (DQN) Tutorial":
https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
"""
def __init__(self, loss, delta=1.):
"""
Initialize the Huber loss.
Args:
loss (Loss): initial loss to smooth.
delta (float): coefficient
"""
super(HuberLoss, self).__init__()
self.loss = loss
self.delta = delta
def _compute(self, batch):
r"""
Compute the Huber loss.
Args:
batch (Batch): batch containing the states, actions, rewards, etc.
Returns:
torch.Tensor: loss scalar value
"""
a = self.loss(batch)
if abs(a) <= self.delta:
return 0.5 * torch.pow(a, 2)
return self.delta * (torch.abs(a) - 0.5 * self.delta)
class PseudoHuberLoss(BatchLoss):
r"""Pseudo-Huber Loss
"The Pseudo-Huber loss function can be used as a smooth approximation of the Huber loss function. It combines the
best properties of L2 squared loss and L1 absolute loss by being strongly convex when close to the target/minimum
and less steep for extreme values. This steepness can be controlled by the :math:`\delta` value. The Pseudo-Huber
loss function ensures that derivatives are continuous for all degrees. It is defined as:
.. math:: {\mathcal L}_{\delta}(a) = \delta^2 \left( \sqrt{1 + (a/\delta)^2} - 1 \right)
As such, this function approximates :math:`a^2/2` for small values of :math:`a`, and approximates a straight line
with slope :math:`\delta` for large values of :math:`a`.
While the above is the most common form, other smooth approximations of the Huber loss function also exist." [1]
References:
- [1] Huber Loss (on Wikipedia): https://en.wikipedia.org/wiki/Huber_loss#Pseudo-Huber_loss_function
"""
def __init__(self, loss, delta=1.):
"""
Initialize the Pseudo-Huber loss.
Args:
loss (Loss): initial loss to smooth.
delta (float): steepness coefficient
"""
super(PseudoHuberLoss, self).__init__()
self.loss = loss
self.delta = delta
def _compute(self, batch):
r"""
Compute the pseudo Huber loss.
Args:
batch (Batch): batch containing the states, actions, rewards, etc.
Returns:
torch.Tensor: loss scalar value
"""
a = self.loss(batch)
return self.delta**2 * (torch.sqrt(1 + (a/self.delta)**2) - 1)
class KLLoss(BatchLoss):
r"""KL Penalty Loss
KL Penalty to minimize:
.. math:: L^{KL}(\theta) = \mathbb{E}[ KL( p || q) ]
where :math:`KL(.||.)` is the KL-divergence between two probability distributions.
"""
def __init__(self, p, q):
"""
Initialize the KL Penalty loss.
Args:
p (torch.distributions.Distribution): 1st distribution
q (torch.distributions.Distribution): 2nd distribution
"""
super(KLLoss, self).__init__()
self.p = p
self.q = q
def _compute(self, batch):
"""
Compute :math:`KL(p||q)`.
Args:
batch (Batch): batch containing the states, actions, rewards, etc.
Returns:
torch.Tensor: loss scalar value
"""
# TODO use the batch
return torch.distributions.kl.kl_divergence(self.p, self.q)
def latex(self):
"""Return a latex formula of the loss."""
return r"\mathbb{E}[ KL( p || q ) ]"
# class ForwardKLPenaltyLoss(KLPenaltyLoss):
# r"""Forward KL Penalty Loss"""
# pass
#
#
# class ReverseKLPenaltyLoss(KLPenaltyLoss):
# r"""Reverse KL Penalty Loss"""
# pass
class HLoss(BatchLoss):
r"""Entropy Loss
Entropy loss of a distribution:
.. math:: L^{Entropy}(\theta) = H[ p ]
where :math:`H[.]` is the Shannon entropy of the given probability distribution.
"""
def __init__(self, distribution):
"""
Initialize the entropy loss.
Args:
distribution (torch.distributions.Distribution): probability distribution.
"""
super(HLoss, self).__init__()
if not isinstance(distribution, torch.distributions.Distribution):
raise TypeError("Expecting the given distribution to be an instance of `torch.distributions.Distribution`, "
"instead got: {}".format(type(distribution)))
self.p = distribution
def _compute(self, batch):
"""
Compute the entropy loss.
Args:
batch (Batch): batch containing the states, actions, rewards, etc.
Returns:
torch.Tensor: loss scalar value
"""
entropy = self.p.entropy().mean()
return entropy
# Tests
if __name__ == '__main__':
# compute the losses
loss = - FixedLoss(3) # + FixedLoss(2)
print(loss(2))
| [
"torch.sqrt",
"torch.abs",
"torch.distributions.kl.kl_divergence",
"torch.tensor",
"torch.pow"
] | 0.1.0 | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 |
1.7 | import datetime
import logging
import math
import time
import torch
from os import path as osp
from basicsr.data import build_dataloader, build_dataset
from basicsr.data.data_sampler import EnlargedSampler
from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from basicsr.models import build_model
from basicsr.utils import (AvgTimer, MessageLogger, check_resume, get_env_info, get_root_logger, get_time_str,
init_tb_logger, init_wandb_logger, make_exp_dirs, mkdir_and_rename, scandir)
from basicsr.utils.options import copy_opt_file, dict2str, parse_options
def init_tb_loggers(opt):
# initialize wandb logger before tensorboard logger to allow proper sync
if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project')
is not None) and ('debug' not in opt['name']):
assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb')
init_wandb_logger(opt)
tb_logger = None
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
tb_logger = init_tb_logger(log_dir=osp.join(opt['root_path'], 'tb_logger', opt['name']))
return tb_logger
def create_train_val_dataloader(opt, logger):
# create train and val dataloaders
train_loader, val_loader = None, None
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
train_set = build_dataset(dataset_opt)
train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio)
train_loader = build_dataloader(
train_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=train_sampler,
seed=opt['manual_seed'])
num_iter_per_epoch = math.ceil(
len(train_set) * dataset_enlarge_ratio / (dataset_opt['batch_size_per_gpu'] * opt['world_size']))
total_iters = int(opt['train']['total_iter'])
total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
logger.info('Training statistics:'
f'\n\tNumber of train images: {len(train_set)}'
f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
f'\n\tWorld size (gpu number): {opt["world_size"]}'
f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')
elif phase == 'val':
val_set = build_dataset(dataset_opt)
val_loader = build_dataloader(
val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed'])
logger.info(f'Number of val images/folders in {dataset_opt["name"]}: {len(val_set)}')
else:
raise ValueError(f'Dataset phase {phase} is not recognized.')
return train_loader, train_sampler, val_loader, total_epochs, total_iters
def load_resume_state(opt):
resume_state_path = None
if opt['auto_resume']:
state_path = osp.join('experiments', opt['name'], 'training_states')
if osp.isdir(state_path):
states = list(scandir(state_path, suffix='state', recursive=False, full_path=False))
if len(states) != 0:
states = [float(v.split('.state')[0]) for v in states]
resume_state_path = osp.join(state_path, f'{max(states):.0f}.state')
opt['path']['resume_state'] = resume_state_path
else:
if opt['path'].get('resume_state'):
resume_state_path = opt['path']['resume_state']
if resume_state_path is None:
resume_state = None
else:
device_id = torch.cuda.current_device()
resume_state = torch.load(resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id))
check_resume(opt, resume_state['iter'])
return resume_state
def train_pipeline(root_path):
# parse options, set distributed setting, set ramdom seed
opt, args = parse_options(root_path, is_train=True)
opt['root_path'] = root_path
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# load resume states if necessary
resume_state = load_resume_state(opt)
# mkdir for experiments and logger
if resume_state is None:
make_exp_dirs(opt)
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name'] and opt['rank'] == 0:
mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name']))
# copy the yml file to the experiment root
copy_opt_file(args.opt, opt['path']['experiments_root'])
# WARNING: should not use get_root_logger in the above codes, including the called functions
# Otherwise the logger will not be properly initialized
log_file = osp.join(opt['path']['log'], f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# initialize wandb and tb loggers
tb_logger = init_tb_loggers(opt)
# create train and validation dataloaders
result = create_train_val_dataloader(opt, logger)
train_loader, train_sampler, val_loader, total_epochs, total_iters = result
# create model
model = build_model(opt)
if resume_state: # resume training
model.resume_training(resume_state) # handle optimizers and schedulers
logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " f"iter: {resume_state['iter']}.")
start_epoch = resume_state['epoch']
current_iter = resume_state['iter']
else:
start_epoch = 0
current_iter = 0
# create message logger (formatted outputs)
msg_logger = MessageLogger(opt, current_iter, tb_logger)
# dataloader prefetcher
prefetch_mode = opt['datasets']['train'].get('prefetch_mode')
if prefetch_mode is None or prefetch_mode == 'cpu':
prefetcher = CPUPrefetcher(train_loader)
elif prefetch_mode == 'cuda':
prefetcher = CUDAPrefetcher(train_loader, opt)
logger.info(f'Use {prefetch_mode} prefetch dataloader')
if opt['datasets']['train'].get('pin_memory') is not True:
raise ValueError('Please set pin_memory=True for CUDAPrefetcher.')
else:
raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' "Supported ones are: None, 'cuda', 'cpu'.")
# training
logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}')
data_timer, iter_timer = AvgTimer(), AvgTimer()
start_time = time.time()
for epoch in range(start_epoch, total_epochs + 1):
train_sampler.set_epoch(epoch)
prefetcher.reset()
train_data = prefetcher.next()
while train_data is not None:
data_timer.record()
current_iter += 1
if current_iter > total_iters:
break
# update learning rate
model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1))
# training
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_timer.record()
if current_iter == 1:
# reset start time in msg_logger for more accurate eta_time
# not work in resume mode
msg_logger.reset_start_time()
# log
if current_iter % opt['logger']['print_freq'] == 0:
log_vars = {'epoch': epoch, 'iter': current_iter}
log_vars.update({'lrs': model.get_current_learning_rate()})
log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()})
log_vars.update(model.get_current_log())
msg_logger(log_vars)
# save models and training states
if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
logger.info('Saving models and training states.')
model.save(epoch, current_iter)
# validation
if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0):
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
data_timer.start()
iter_timer.start()
train_data = prefetcher.next()
# end of iter
# end of epoch
consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
logger.info(f'End of training. Time consumed: {consumed_time}')
logger.info('Save the latest model.')
model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
if opt.get('val') is not None:
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
if tb_logger:
tb_logger.close()
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_pipeline(root_path)
| [
"torch.cuda.current_device"
] | 1.7 | Zeo95/BasicSR | 0e55b20c2a88428961eceb28dd87558b038c4322 |
1.11 | #
# Created on March 2022
#
# Copyright (c) 2022 Meitar Ronen
#
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_Classifier(nn.Module):
def __init__(self, hparams, codes_dim=320, k=None, weights_fc1=None, weights_fc2=None, bias_fc1=None, bias_fc2=None,):
super(MLP_Classifier, self).__init__()
if k is None:
self.k = hparams.init_k
else:
self.k = k
self.codes_dim = codes_dim
self.hidden_dims = hparams.clusternet_hidden_layer_list
self.last_dim = self.hidden_dims[-1]
self.class_fc1 = nn.Linear(self.codes_dim, self.hidden_dims[0])
hidden_modules = []
for i in range(len(self.hidden_dims) - 1):
hidden_modules.append(nn.Linear(self.hidden_dims[i], self.hidden_dims[i+1]))
hidden_modules.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_modules)
self.class_fc2 = nn.Linear(self.hidden_dims[-1], self.k)
print(self.hidden_layers)
if weights_fc1 is not None:
self.class_fc1.weight.data = weights_fc1
if weights_fc2 is not None:
self.class_fc2.weight.data = weights_fc2
if bias_fc1 is not None:
self.class_fc1.bias.data = bias_fc1
if bias_fc2 is not None:
self.class_fc2.bias.data = bias_fc2
self.softmax_norm = hparams.softmax_norm
def _check_nan(self, x, num):
if torch.isnan(x).any():
print(f"forward {num}")
if torch.isnan(self.class_fc1.weight.data).any():
print("fc1 weights contain nan")
elif torch.isnan(self.class_fc1.bias.data).any():
print("fc1 bias contain nan")
elif torch.isnan(self.class_fc2.weight.data).any():
print("fc2 weights contain nan")
elif torch.isnan(self.class_fc2.bias.data).any():
print("fc2 bias contain nan")
else:
print("no weights are nan!")
def forward(self, x):
x = x.view(-1, self.codes_dim)
x = F.relu(self.class_fc1(x))
x = self.hidden_layers(x)
x = self.class_fc2(x)
x = torch.mul(x, self.softmax_norm)
return F.softmax(x, dim=1)
def update_K_split(self, split_decisions, init_new_weights="same", subclusters_nets=None):
# split_decisions is a list of K booleans indicating whether to split a cluster or not
# update the classifier to have K' more classes, where K' is the number of clusters that should be split
# deleting the old clustering and addind the new ones to the end (weights)
class_fc2 = self.class_fc2
mus_ind_to_split = torch.nonzero(split_decisions, as_tuple=False)
self.k += len(mus_ind_to_split)
with torch.no_grad():
self.class_fc2 = nn.Linear(self.last_dim, self.k)
# Adjust weights
weights_not_split = class_fc2.weight.data[torch.logical_not(split_decisions.bool()), :]
weights_splits = class_fc2.weight.data[split_decisions.bool(), :]
new_weights = self._initalize_weights_split(
weights_splits, split_decisions, init_new_weight=init_new_weights, subclusters_nets=subclusters_nets
)
self.class_fc2.weight.data = torch.cat(
[weights_not_split, new_weights]
)
# Adjust bias
bias_not_split = class_fc2.bias.data[torch.logical_not(split_decisions.bool())]
bias_split = class_fc2.bias.data[split_decisions.bool()]
new_bias = self._initalize_bias_split(bias_split, split_decisions, init_new_weight=init_new_weights, subclusters_nets=subclusters_nets)
self.class_fc2.bias.data = torch.cat([bias_not_split, new_bias])
def update_K_merge(self, merge_decisions, pairs_to_merge, highest_ll, init_new_weights="same"):
""" Update the clustering net after a merge decision was made
Args:
merge_decisions (torch.tensor): a list of K booleans indicating whether to a cluster should be merged or not
pairs_to_merge ([type]): a list of lists, which list contains the indices of two clusters to merge
init_new_weights (str, optional): How to initialize the weights of the new weights of the merged cluster. Defaults to "same".
"same" uses the weights of the cluster with the highest loglikelihood, "random" uses random weights.
highest_ll ([type]): a list of the indices of the clusters with the highest log likelihood for each pair.
Description:
We will delete the weights of the two merged clusters, and append (to the end) the weights of the newly merged clusters
"""
self.k -= len(highest_ll)
with torch.no_grad():
class_fc2 = nn.Linear(self.last_dim, self.k)
# Adjust weights
weights_not_merged = self.class_fc2.weight.data[torch.logical_not(merge_decisions), :]
weights_merged = self.class_fc2.weight.data[merge_decisions, :]
new_weights = self._initalize_weights_merge(
weights_merged, merge_decisions, highest_ll, init_new_weight=init_new_weights
)
class_fc2.weight.data = torch.cat(
[weights_not_merged, new_weights]
)
# Adjust bias
bias_not_merged = self.class_fc2.bias.data[torch.logical_not(merge_decisions)]
bias_merged = self.class_fc2.bias.data[merge_decisions]
new_bias = self._initalize_bias_merge(bias_merged, merge_decisions, highest_ll, init_new_weight=init_new_weights)
class_fc2.bias.data = torch.cat([bias_not_merged, new_bias])
self.class_fc2 = class_fc2
def _initalize_weights_split(self, weight, split_decisions, init_new_weight, subclusters_nets=None):
if init_new_weight == "same":
# just duplicate, can think of something more complex later
return weight.repeat(1, 2).view(-1, self.last_dim)
elif init_new_weight == "random":
return torch.FloatTensor(weight.shape[0]*2, weight.shape[1]).uniform_(-1., 1).to(device=self.device)
elif init_new_weight == "subclusters":
new_weights = []
for k, split in enumerate(split_decisions):
if split:
new_weights.append(subclusters_nets.class_fc2.weight.data[2 * k: 2*(k + 1), self.last_dim * k: self.last_dim * (k+1)].clone())
return torch.cat(new_weights)
else:
raise NotImplementedError
def _initalize_weights_merge(self, weights_merged, merge_decisions, highest_ll, init_new_weight="same"):
if init_new_weight == "same":
# Take the weights of the cluster with highest likelihood
ll = [i[0].item() for i in highest_ll]
return self.class_fc2.weight.data[ll, :]
elif init_new_weight == "random":
return torch.FloatTensor(len(highest_ll), weights_merged.shape[1]).uniform_(-1., 1).to(device=self.device)
elif init_new_weight == "average":
raise NotImplementedError()
else:
raise NotImplementedError
def _initalize_bias_split(self, bias_split, split_decisions, init_new_weight, subclusters_nets=None):
if init_new_weight == "same":
# just duplicate
return bias_split.repeat_interleave(2)
elif init_new_weight == "random":
return torch.zeros(bias_split.shape[0]*2).to(device=self.device)
elif init_new_weight == "subclusters":
new_bias = []
for k, split in enumerate(split_decisions):
if split:
new_bias.append(subclusters_nets.class_fc2.bias.data[2 * k: 2*(k + 1)].clone())
return torch.cat(new_bias)
else:
raise NotImplementedError
def _initalize_bias_merge(self, bias_merged, merge_decisions, highest_ll, init_new_weight="same"):
if init_new_weight == "same":
# take the biases of the highest likelihood
ll = [i[0].item() for i in highest_ll]
return self.class_fc2.bias.data[ll]
elif init_new_weight == "random":
return torch.zeros(len(highest_ll)).to(device=self.device)
elif init_new_weight == "average":
raise NotImplementedError
else:
raise NotImplementedError
class Subclustering_net_duplicating(nn.Module):
def __init__(self, hparams, codes_dim=320, k=None):
super(MLP_Classifier, self).__init__()
if k is None:
self.K = hparams.init_k
else:
self.K = k
self.codes_dim = codes_dim
self.hparams = hparams
self.hidden_dim = 50
self.softmax_norm = self.hparams.subcluster_softmax_norm
# the subclustering net will be a stacked version of the clustering net
self.class_fc1 = nn.Linear(self.codes_dim * self.K, self.hidden_dim * self.K)
self.class_fc2 = nn.Linear(self.hidden_dim * self.K, 2 * self.K)
gradient_mask_fc1 = torch.ones(self.codes_dim * self.K, self.hidden_dim * self.K)
gradient_mask_fc2 = torch.ones(self.hidden_dim * self.K, 2 * self.K)
# detach different subclustering nets - zeroing out the weights connecting between different subnets
# and also zero their gradient
for k in range(self.K):
# row are the output neurons and columns are of the input ones
# before
self.class_fc1.weight.data[self.hidden_dim * k: self.hidden_dim * (k + 1), :self.codes_dim * k] = 0
gradient_mask_fc1[self.hidden_dim * k: self.hidden_dim * (k + 1), :self.codes_dim * k] = 0
self.class_fc2.weight.data[2 * k: 2 * (k + 1), :self.hidden_dim * k] = 0
gradient_mask_fc2[2 * k: 2 * (k + 1), :self.hidden_dim * k] = 0
# after
self.class_fc1.weight.data[self.hidden_dim * k: self.hidden_dim * (k + 1), :self.codes_dim * (k + 1)] = 0
gradient_mask_fc1[self.hidden_dim * k: self.hidden_dim * (k + 1), :self.codes_dim * (k + 1)] = 0
self.class_fc2.weight.data[2 * k: 2 * (k + 1), :self.hidden_dim * (k + 1)] = 0
gradient_mask_fc2[2 * k: 2 * (k + 1), :self.hidden_dim * (k + 1)] = 0
self.class_fc1.weight.register_hook(lambda grad: grad.mul_(gradient_mask_fc1))
self.class_fc2.weight.register_hook(lambda grad: grad.mul_(gradient_mask_fc2))
# weights are zero and their grad will always be 0 so won't change
def forward(self, X, hard_assign):
X = self.reshape_input(X, hard_assign)
X = F.relu(self.class_fc1(X))
X = self.class_fc2(X)
X = torch.mul(X, self.softmax_norm)
return F.softmax(X, dim=1)
def reshape_input(self, X, hard_assign):
# each input (batch_size X codes_dim) will be padded with zeros to insert to the stacked subnets
X = X.view(-1, self.codes_dim)
new_batch = torch.zeros(X.size(0), self.K, X.size(1))
for k in range(self.K):
new_batch[hard_assign == k, k, :] = X[hard_assign == k]
new_batch = new_batch.view(X.size(0), -1) # in s_batch X d * K
return new_batch
class Subclustering_net(nn.Module):
# Duplicate only inner layer
# SHAPE is input dim -> 50 * K -> 2 * K
def __init__(self, hparams, codes_dim=320, k=None):
super(Subclustering_net, self).__init__()
if k is None:
self.K = hparams.init_k
else:
self.K = k
self.codes_dim = codes_dim
self.hparams = hparams
self.hidden_dim = 50
self.softmax_norm = self.hparams.softmax_norm
self.device = "cuda" if torch.cuda.is_available() and hparams.gpus is not None else "cpu"
# the subclustering net will be a stacked version of the clustering net
self.class_fc1 = nn.Linear(self.codes_dim, self.hidden_dim * self.K)
self.class_fc2 = nn.Linear(self.hidden_dim * self.K, 2 * self.K)
gradient_mask_fc2 = torch.zeros(self.hidden_dim * self.K, 2 * self.K)
# detach different subclustering nets - zeroing out the weights connecting between different subnets
# and also zero their gradient
for k in range(self.K):
gradient_mask_fc2[self.hidden_dim * k:self.hidden_dim * (k + 1), 2 * k: 2 * (k + 1)] = 1
self.class_fc2.weight.data *= gradient_mask_fc2.T
self.class_fc2.weight.register_hook(lambda grad: grad.mul_(gradient_mask_fc2.T.to(device=self.device)))
# weights are zero and their grad will always be 0 so won't change
def forward(self, X):
# Note that there is no softmax here
X = F.relu(self.class_fc1(X))
X = self.class_fc2(X)
return X
def update_K_split(self, split_decisions, init_new_weights="same"):
# split_decisions is a list of K booleans indicating whether to split a cluster or not
# update the classifier to have K' more classes, where K' is the number of clusters that should be split
# deleting the old clustering and addind the new ones to the end (weights)
class_fc1 = self.class_fc1
class_fc2 = self.class_fc2
mus_ind_to_split = torch.nonzero(split_decisions, as_tuple=False)
mus_ind_not_split = torch.nonzero(torch.logical_not(split_decisions), as_tuple=False)
self.K += len(mus_ind_to_split)
with torch.no_grad():
self.class_fc1 = nn.Linear(self.codes_dim, self.hidden_dim * self.K)
self.class_fc2 = nn.Linear(self.hidden_dim * self.K, 2 * self.K)
# Adjust weights
fc1_weights_not_split = class_fc1.weight.data[torch.logical_not(split_decisions.bool()).repeat_interleave(self.hidden_dim), :]
fc1_weights_split = class_fc1.weight.data[split_decisions.bool().repeat_interleave(self.hidden_dim), :]
fc1_new_weights = self._initalize_weights_split(
fc1_weights_split, init_new_weight=init_new_weights
)
self.class_fc1.weight.data = torch.cat(
[fc1_weights_not_split, fc1_new_weights]
)
self.class_fc2.weight.data.fill_(0)
gradient_mask_fc2 = torch.zeros(self.hidden_dim * self.K, 2 * self.K)
for i, k in enumerate(mus_ind_not_split):
# i is the new index of the cluster and k is the old one
self.class_fc2.weight.data[2 * i: 2*(i + 1), self.hidden_dim * i: self.hidden_dim * (i+1)] = class_fc2.weight.data[2 * k: 2*(k + 1), self.hidden_dim * k: self.hidden_dim * (k+1)]
gradient_mask_fc2[self.hidden_dim * i:self.hidden_dim * (i + 1), 2 * i: 2 * (i + 1)] = 1
for j, k in enumerate(mus_ind_to_split.repeat_interleave(2)):
# j + len(mus_ind_not_split) is the new index and k is the old one. We use interleave to create 2 new clusters for each split cluster
i = j + len(mus_ind_not_split)
weights = class_fc2.weight.data[2 * k: 2*(k + 1), self.hidden_dim * k: self.hidden_dim * (k+1)]
if init_new_weights != 'same':
weights = self._initalize_weights_split(weights, init_new_weights, num=1)
self.class_fc2.weight.data[2 * i: 2*(i + 1), self.hidden_dim * i: self.hidden_dim * (i+1)] = weights
gradient_mask_fc2[self.hidden_dim * i:self.hidden_dim * (i + 1), 2 * i: 2 * (i + 1)] = 1
self.class_fc2.weight.register_hook(lambda grad: grad.mul_(gradient_mask_fc2.T.to(device=self.device)))
# Adjust bias
fc1_bias_not_split = class_fc1.bias.data[torch.logical_not(split_decisions.bool()).repeat_interleave(self.hidden_dim)]
fc1_bias_split = class_fc1.bias.data[split_decisions.bool().repeat_interleave(self.hidden_dim)]
fc2_bias_not_split = class_fc2.bias.data[torch.logical_not(split_decisions.bool()).repeat_interleave(2)]
fc2_bias_split = class_fc2.bias.data[split_decisions.bool().repeat_interleave(2)]
fc1_new_bias = self._initalize_bias_split(fc1_bias_split, init_new_weight=init_new_weights)
fc2_new_bias = self._initalize_bias_split(fc2_bias_split, init_new_weight=init_new_weights)
self.class_fc1.bias.data = torch.cat([fc1_bias_not_split, fc1_new_bias])
self.class_fc2.bias.data = torch.cat([fc2_bias_not_split, fc2_new_bias])
self.class_fc1.to(device=self.device)
self.class_fc2.to(device=self.device)
del class_fc1, class_fc2
def update_K_merge(self, merge_decisions, pairs_to_merge, highest_ll, init_new_weights="highest_ll"):
""" Update the clustering net after a merge decision was made
Args:
merge_decisions (torch.tensor): a list of K booleans indicating whether to a cluster should be merged or not
pairs_to_merge ([type]): a list of lists, which list contains the indices of two clusters to merge
init_new_weights (str, optional): How to initialize the weights of the new weights of the merged cluster. Defaults to "same".
"same" uses the weights of the cluster with the highest loglikelihood, "random" uses random weights.
highest_ll ([type]): a list of the indices of the clusters with the highest log likelihood for each pair.
Description:
We will delete the weights of the two merged clusters, and append (to the end) the weights of the newly merged clusters
"""
class_fc1 = self.class_fc1
class_fc2 = self.class_fc2
mus_ind_not_merged = torch.nonzero(torch.logical_not(torch.tensor(merge_decisions)), as_tuple=False)
self.K -= len(highest_ll)
with torch.no_grad():
self.class_fc1 = nn.Linear(self.codes_dim, self.hidden_dim * self.K)
self.class_fc2 = nn.Linear(self.hidden_dim * self.K, 2 * self.K)
# Adjust weights
fc1_weights_not_merged = class_fc1.weight.data[torch.logical_not(torch.tensor(merge_decisions)).repeat_interleave(self.hidden_dim), :]
fc1_new_weights = []
fc1_new_bias = []
fc2_new_bias = []
for merge_pair, highest_ll_k in zip(pairs_to_merge, highest_ll):
fc1_weights_merged = [
class_fc1.weight.data[k * self.hidden_dim: (k + 1) * self.hidden_dim, :] for k in merge_pair]
fc1_new_weights.append(self._initalize_weights_merge(
fc1_weights_merged, (torch.tensor(highest_ll_k) == merge_pair[1]).item(), init_new_weight=init_new_weights
))
fc1_bias_merged = [
class_fc1.bias.data[k * self.hidden_dim: (k + 1) * self.hidden_dim] for k in merge_pair]
fc1_new_bias.append(self._initalize_weights_merge(
fc1_bias_merged, (torch.tensor(highest_ll_k) == merge_pair[1]).item(), init_new_weight=init_new_weights
))
fc1_new_weights = torch.cat(fc1_new_weights)
fc1_new_bias = torch.cat(fc1_new_bias)
self.class_fc1.weight.data = torch.cat(
[fc1_weights_not_merged, fc1_new_weights]
)
self.class_fc2.weight.data.fill_(0)
gradient_mask_fc2 = torch.zeros(self.hidden_dim * self.K, 2 * self.K)
for i, k in enumerate(mus_ind_not_merged):
# i is the new index of the cluster and k is the old one
self.class_fc2.weight.data[2 * i: 2*(i + 1), self.hidden_dim * i: self.hidden_dim * (i+1)] =\
class_fc2.weight.data[2 * k: 2*(k + 1), self.hidden_dim * k: self.hidden_dim * (k+1)]
gradient_mask_fc2[self.hidden_dim * i:self.hidden_dim * (i + 1), 2 * i: 2 * (i + 1)] = 1
for j, (merge_pair, highest_ll_k) in enumerate(zip(pairs_to_merge, highest_ll)):
# j + len(mus_ind_not_split) is the new index and k is the old one. We use interleave to create 2 new clusters for each split cluster
i = j + len(mus_ind_not_merged)
weights = [class_fc2.weight.data[2 * k: 2*(k + 1), self.hidden_dim * k: self.hidden_dim * (k+1)] for k in merge_pair]
weights = self._initalize_weights_merge(weights, (torch.tensor(highest_ll_k) == merge_pair[1]).item(), init_new_weights)
bias = [class_fc2.bias.data[2 * k: 2*(k + 1)] for k in merge_pair]
bias = self._initalize_weights_merge(bias, (torch.tensor(highest_ll_k) == merge_pair[1]).item(), init_new_weights)
fc2_new_bias.append(bias)
self.class_fc2.weight.data[2 * i: 2*(i + 1), self.hidden_dim * i: self.hidden_dim * (i+1)] = weights
gradient_mask_fc2[self.hidden_dim * i:self.hidden_dim * (i + 1), 2 * i: 2 * (i + 1)] = 1
self.class_fc2.weight.register_hook(lambda grad: grad.mul_(gradient_mask_fc2.T.to(device=self.device)))
fc2_new_bias = torch.cat(fc2_new_bias)
# Adjust bias
fc1_bias_not_merged = class_fc1.bias.data[torch.logical_not(merge_decisions).repeat_interleave(self.hidden_dim)]
fc2_bias_not_merged = class_fc2.bias.data[torch.logical_not(merge_decisions).repeat_interleave(2)]
self.class_fc1.bias.data = torch.cat([fc1_bias_not_merged, fc1_new_bias])
self.class_fc2.bias.data = torch.cat([fc2_bias_not_merged, fc2_new_bias])
self.class_fc1.to(device=self.device)
self.class_fc2.to(device=self.device)
del class_fc1, class_fc2
def _initalize_weights_split(self, weight, init_new_weight, num=2):
if init_new_weight == "same":
# just duplicate
dup = weight.reshape(-1, self.hidden_dim, self.codes_dim).repeat_interleave(num, 0)
return torch.cat([dup[i] for i in range(dup.size(0))])
elif init_new_weight == "same_w_noise":
# just duplicate
dup = weight.reshape(-1, weight.size(0), weight.size(1)).repeat_interleave(num, 0)
return torch.cat([dup[i] + torch.FloatTensor(dup[i].size(0), dup[i].size(1)).uniform_(-0.01, 0.01).to(device=self.device) for i in range(dup.size(0))])
elif init_new_weight == "random":
return torch.FloatTensor(weight.shape[0]*num, weight.shape[1]).uniform_(-1., 1).to(device=self.device)
else:
raise NotImplementedError
def _initalize_weights_merge(self, weights_list, highest_ll_loc, init_new_weight="highest_ll", num=2):
if init_new_weight == "highest_ll":
# keep the weights of the more likely cluster
return weights_list[highest_ll_loc]
elif init_new_weight == "random_choice":
return weights_list[torch.round(torch.rand(1)).int().item()]
elif init_new_weight == "random":
return torch.FloatTensor(weights_list[0].shape[0], weights_list[0].shape[1]).uniform_(-1., 1).to(device=self.device)
else:
raise NotImplementedError
def _initalize_bias_split(self, bias_split, init_new_weight):
if init_new_weight == "same":
# just duplicate, can think of something more complex later
return bias_split.repeat(2)
elif init_new_weight == "same_w_noise":
# just duplicate, can think of something more complex later
return bias_split.repeat(2) + torch.FloatTensor(bias_split.repeat(2).size(0)).uniform_(-0.01, 0.01).to(device=self.device)
elif init_new_weight == "random":
return torch.zeros(bias_split.shape[0]*2).to(device=self.device)
else:
raise NotImplementedError
def _initalize_bias_merge(self, bias_list, highest_ll, init_new_weight="highest_ll", num=2):
if init_new_weight == "highest_ll":
# keep the weights of the more likely cluster
return bias_list[highest_ll]
elif init_new_weight == "random":
return bias_list[torch.round(torch.rand(1)).int().item()]
else:
raise NotImplementedError
class Conv_Classifier(nn.Module):
def __init__(self, hparams):
super(Conv_Classifier, self).__init__()
self.hparams = hparams
raise NotImplementedError("Need to implement split merge operations!")
# classifier
self.class_conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.class_conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.class_conv2_drop = nn.Dropout2d()
self.class_fc1 = nn.Linear(320, 50)
self.class_fc2 = nn.Linear(50, hparams.init_k)
def forward(self, x):
x = F.relu(F.max_pool2d(self.class_conv1(x), 2))
x = F.relu(F.max_pool2d(self.class_conv2_drop(self.class_conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.class_fc1(x))
x = F.dropout(x, training=self.training)
x = self.class_fc2(x)
return F.softmax(x, dim=1)
| [
"torch.nn.Linear",
"torch.cat",
"torch.isnan",
"torch.ones",
"torch.cuda.is_available",
"torch.mul",
"torch.FloatTensor",
"torch.logical_not",
"torch.tensor",
"torch.zeros",
"torch.nonzero",
"torch.nn.Sequential",
"torch.nn.functional.dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.rand",
"torch.no_grad",
"torch.nn.Dropout2d"
] | 1.11.0 | BGU-CS-VIL/DeepDPM | 46649f29513e3f69dcaea913b57c75b4b16a9d61 |
1.6 | import itertools
from typing import Tuple, List
import torch
from torch import nn
import torch.nn.functional as F
class RandomConv1d(nn.Module):
def __init__(
self,
channels: int,
filters: int,
sizes: Tuple[int, ...] = (7, 9, 11),
max_dilation_exponent: int = 7,
):
super().__init__()
self.convolutions = nn.ModuleList()
for k, d_exp, pad in itertools.product(
sizes, range(max_dilation_exponent + 1), (True, False)
):
padding = (k - 1) // 2 if pad else 0
self.convolutions.append(
nn.Conv1d(
channels,
filters * channels,
k,
padding=padding,
dilation=2 ** d_exp,
groups=channels,
)
)
self.random_weights()
def random_weights(self) -> None:
for conv in self.convolutions:
nn.init.normal_(conv.weight)
conv.weight.data -= conv.weight.mean(dim=-1, keepdim=True)
nn.init.uniform_(conv.bias, -1, 1)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
outputs = []
for conv in self.convolutions:
d = conv.weight.shape[-1] * conv.dilation[-1] - 1 - x.shape[-1]
if d > 0:
padding_left = d // 2
padding_right = d - padding_left
outputs.append(conv(F.pad(x, [padding_left, padding_right])))
else:
outputs.append(conv(x))
return outputs
| [
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.init.normal_",
"torch.nn.init.uniform_",
"torch.nn.functional.pad"
] | 1.6.0 | lucagrementieri/eegdrive | 65b122246e2a75c0c7c80db3e544f6a6741ceb53 |
1.6 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms, datasets
import pytorch_lightning as pl
from einops import rearrange, repeat
from vit_pytorch_lightning import ViT
def CIFAR10dataset(batch_size=256, num_workers=4):
transform = transforms.Compose([
transforms.ToTensor()
])
# データセットの取得
train_val = datasets.CIFAR10('./', train=True, download=True, transform=transform)
test = datasets.CIFAR10('./', train=False, download=True, transform=transform)
# train と val に分割
torch.manual_seed(0)
n_train, n_val = 40000, 10000
train, val = torch.utils.data.random_split(train_val, [n_train, n_val])
# Data Loader
train_loader = torch.utils.data.DataLoader(train, batch_size, shuffle=True, drop_last=True, num_workers=num_workers)
val_loader = torch.utils.data.DataLoader(val, batch_size, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test, batch_size, num_workers=num_workers)
return train_loader, val_loader, test_loader
def main(train_loader, val_loader, test_loader):
pl.seed_everything(0)
vit = ViT(dim=16, depth=12, heads=8, image_size=32, patch_size=4, num_classes=10, channels=3, mlp_dim=64)
trainer = pl.Trainer(max_epochs=1)
trainer.fit(vit, train_loader, val_loader)
results = trainer.test(test_dataloaders=test_loader)
print(results)
if __name__ == '__main__':
train, val, test = CIFAR10dataset()
main(train, val, test)
| [
"torch.manual_seed",
"torch.utils.data.random_split",
"torch.utils.data.DataLoader"
] | 1.6 | makoto-sofue/vit-pytorch-lightning | da8cace2ba06a2d1b277dec9a50ec9cd97b61230 |
1.2 | #!/usr/bin/env python3
"""Calculates ***Single Image*** Frechet Inception Distance (SIFID) to evalulate Single-Image-GANs
Code was adapted from:
https://github.com/mseitzer/pytorch-fid.git
Which was adapted from the TensorFlow implementation of:
https://github.com/bioinf-jku/TTUR
The FID metric calculates the distance between two distributions of images.
The SIFID calculates the distance between the distribution of deep features of a single real image and a single fake image.
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
#from scipy.misc import imread
from matplotlib.pyplot import imread
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from inception import InceptionV3
import torchvision
import numpy
import scipy
import pickle
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--path2real', type=str, help=('Path to the real images'))
parser.add_argument('--path2fake', type=str, help=('Path to generated images'))
parser.add_argument('-c', '--gpu', default='', type=str, help='GPU to use (leave blank for CPU only)')
parser.add_argument('--images_suffix', default='jpg', type=str, help='image file suffix')
def get_activations(files, model, batch_size=1, dims=64,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if len(files) % batch_size != 0:
print(('Warning: number of images is not a multiple of the '
'batch size. Some samples are going to be ignored.'))
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
n_batches = len(files) // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, dims))
for i in tqdm(range(n_batches)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches),
end='', flush=True)
start = i * batch_size
end = start + batch_size
images = np.array([imread(str(f)).astype(np.float32)
for f in files[start:end]])
images = images[:,:,:,0:3]
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
#images = images[0,:,:,:]
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
#if pred.shape[2] != 1 or pred.shape[3] != 1:
# pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr = pred.cpu().data.numpy().transpose(0, 2, 3, 1).reshape(batch_size*pred.shape[2]*pred.shape[3],-1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=1,
dims=64, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the inception model.
-- sigma : The covariance matrix of the activations of the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(files, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg'))+ list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
def calculate_sifid_given_paths(path1, path2, batch_size, cuda, dims, suffix):
"""Calculates the SIFID of two paths"""
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
path1 = pathlib.Path(path1)
files1 = list(path1.glob('*.%s' %suffix))
path2 = pathlib.Path(path2)
files2 = list(path2.glob('*.%s' %suffix))
print(path1)
print(files1)
fid_values = []
Im_ind = []
for i in range(len(files2)):
m1, s1 = calculate_activation_statistics([files1[i]], model, batch_size, dims, cuda)
m2, s2 = calculate_activation_statistics([files2[i]], model, batch_size, dims, cuda)
fid_values.append(calculate_frechet_distance(m1, s1, m2, s2))
file_num1 = files1[i].name
file_num2 = files2[i].name
Im_ind.append(int(file_num1[:-4]))
Im_ind.append(int(file_num2[:-4]))
return fid_values
if __name__ == '__main__':
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
path1 = args.path2real
path2 = args.path2fake
suffix = args.images_suffix
sifid_values = calculate_sifid_given_paths(path1,path2,1,args.gpu!='',64,suffix)
sifid_values = np.asarray(sifid_values,dtype=np.float32)
numpy.save('SIFID', sifid_values)
print('SIFID: ', sifid_values.mean())
| [
"torch.from_numpy"
] | 1.2 | git-pupil/SinGAN | e1eece165c426e332b69a6da10ec81494a3e1820 |
1.7 | # -*- coding: utf-8 -*-
"""
@date: 2021/2/2 下午5:46
@file: test_regvgg.py
@author: zj
@description:
"""
import torch
from zcls.config import cfg
from zcls.config.key_word import KEY_OUTPUT
from zcls.model.recognizers.build import build_recognizer
from zcls.model.recognizers.vgg.repvgg import RepVGG
from zcls.model.backbones.vgg.repvgg_backbone import arch_settings
from zcls.model.conv_helper import insert_repvgg_block, insert_acblock, fuse_repvgg_block, fuse_acblock
def test_regvgg():
data = torch.randn(1, 3, 224, 224)
for key in arch_settings.keys():
print('*' * 10, key)
cfg.merge_from_file('configs/benchmarks/repvgg/repvgg_b2g4_cifar100_224_e100_sgd_calr.yaml')
model = RepVGG(cfg)
# print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (1, 100)
print('insert_regvgg_block -> fuse_regvgg_block')
insert_repvgg_block(model)
# print(model)
model.eval()
outputs_insert = model(data)[KEY_OUTPUT]
fuse_repvgg_block(model)
# print(model)
model.eval()
outputs_fuse = model(data)[KEY_OUTPUT]
# print(outputs_insert)
# print(outputs_fuse)
print(torch.sqrt(torch.sum((outputs_insert - outputs_fuse) ** 2)))
print(torch.allclose(outputs_insert, outputs_fuse, atol=1e-8))
assert torch.allclose(outputs_insert, outputs_fuse, atol=1e-8)
print('insert_regvgg_block -> insert_acblock -> fuse_acblock -> fuse_regvgg_block')
insert_repvgg_block(model)
insert_acblock(model)
# print(model)
model.eval()
outputs_insert = model(data)[KEY_OUTPUT]
fuse_acblock(model)
fuse_repvgg_block(model)
# print(model)
model.eval()
outputs_fuse = model(data)[KEY_OUTPUT]
print(torch.sqrt(torch.sum((outputs_insert - outputs_fuse) ** 2)))
print(torch.allclose(outputs_insert, outputs_fuse, atol=1e-6))
assert torch.allclose(outputs_insert, outputs_fuse, atol=1e-6)
print('insert_acblock -> insert_regvgg_block -> fuse_regvgg_block -> fuse_acblock')
insert_repvgg_block(model)
insert_acblock(model)
# print(model)
model.eval()
outputs_insert = model(data)[KEY_OUTPUT]
fuse_acblock(model)
fuse_repvgg_block(model)
# print(model)
model.eval()
outputs_fuse = model(data)[KEY_OUTPUT]
print(torch.sqrt(torch.sum((outputs_insert - outputs_fuse) ** 2)))
print(torch.allclose(outputs_insert, outputs_fuse, atol=1e-6))
assert torch.allclose(outputs_insert, outputs_fuse, atol=1e-6)
def test_config_file():
data = torch.randn(3, 3, 224, 224)
print('repvgg_b2g4_custom_cifar100_224_e100_sgd')
config_file = "configs/benchmarks/repvgg/repvgg_b2g4_cifar100_224_e100_sgd_calr.yaml"
cfg.merge_from_file(config_file)
device = torch.device('cpu')
model = build_recognizer(cfg, device)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
fuse_repvgg_block(model)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
print('repvgg_b2g4_acb_custom_cifar100_224_e100_sgd')
config_file = "configs/benchmarks/repvgg/repvgg_b2g4_acb_cifar100_224_e100_sgd_calr.yaml"
cfg.merge_from_file(config_file)
device = torch.device('cpu')
model = build_recognizer(cfg, device)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
# 注意:如果在RepVGG中嵌入了ACBlock,融合时应该先acb再regvgg
fuse_acblock(model)
print(model)
fuse_repvgg_block(model)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
print('acb_repvgg_b2g4_custom_cifar100_224_e100_sgd')
config_file = "configs/benchmarks/repvgg/acb_repvgg_b2g4_cifar100_224_e100_sgd_calr.yaml"
cfg.merge_from_file(config_file)
device = torch.device('cpu')
model = build_recognizer(cfg, device)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
# 注意:如果先嵌入ACBlock再嵌入RepVGGBlock,那么融合时应该先repvgg_block再acblock
fuse_repvgg_block(model)
print(model)
fuse_acblock(model)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
print('rxtd50_32x4d_acb_rvb_custom_cifar100_224_e100_sgd')
config_file = "configs/benchmarks/repvgg/rxtd50_32x4d_acb_rvb_cifar100_224_e100_sgd_calr.yaml"
cfg.merge_from_file(config_file)
device = torch.device('cpu')
model = build_recognizer(cfg, device)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
# 注意:如果先嵌入ACBlock再嵌入RepVGGBlock,那么融合时应该先repvgg_block再acblock
fuse_repvgg_block(model)
print(model)
fuse_acblock(model)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
print('rxtd50_32x4d_rvb_acb_custom_cifar100_224_e100_sgd')
config_file = "configs/benchmarks/repvgg/rxtd50_32x4d_rvb_acb_cifar100_224_e100_sgd_calr.yaml"
cfg.merge_from_file(config_file)
device = torch.device('cpu')
model = build_recognizer(cfg, device)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
# 注意:如果先嵌入RepVGGBlock再嵌入ACBlock,那么逆序融合
fuse_acblock(model)
print(model)
fuse_repvgg_block(model)
print(model)
outputs = model(data)[KEY_OUTPUT]
assert outputs.shape == (3, 100)
if __name__ == '__main__':
test_regvgg()
test_config_file()
| [
"torch.device",
"torch.allclose",
"torch.randn",
"torch.sum"
] | 1.7.1 | likyoo/ZCls | 568621aca3a8b090c93345f0858d52c5757f2f0e |
2 | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import torch
from .size import get_intwarper_trt, IntWarper
from collections.abc import Iterable
def slice_to_trt(dim_size, dim_slice):
start = 0 if dim_slice.start is None else dim_slice.start
stop = dim_size if dim_slice.stop is None else dim_slice.stop
stride = 1 if dim_slice.step is None else dim_slice.step
size = (stop - start - 1) // stride + 1
return start, size, stride
def num_slice_types(slices):
num_slice = 0
for s in slices:
if isinstance(s, slice) or isinstance(s, int) or isinstance(s, Iterable):
num_slice += 1
return num_slice
@tensorrt_converter('torch.Tensor.__getitem__')
def convert_tensor_getitem(ctx):
input = ctx.method_args[0]
slices = ctx.method_args[1]
output = ctx.method_return
# input_trt = input._trt
input_trt = trt_(ctx.network, input)
# Step 1 - Replace ellipsis with expanded slices
if not isinstance(slices, tuple):
slices = (slices,)
# num_ellipsis = input.ndim - num_slice_types(slices)
num_ellipsis = len(input.shape) - num_slice_types(slices)
new_slices = []
new_gather = []
erase_dims = []
add_dims = []
ellipsis_count = 0
for index, s in enumerate(slices):
if s is Ellipsis:
while num_ellipsis > 0:
new_slices.append(slice(None, None, None))
new_gather.append(None)
num_ellipsis -= 1
ellipsis_count += 1
ellipsis_count -= 1
elif isinstance(s, slice):
new_slices.append(s)
new_gather.append(None)
elif s is None:
add_dims.append(index + ellipsis_count)
# new_slices.append(None)
elif isinstance(s, int):
erase_dims.append(index + ellipsis_count)
new_slices.append(s)
new_gather.append(None)
elif isinstance(s, Iterable):
# gather
new_slices.append(slice(None, None, None))
new_gather.append(s)
# fill missing slices at end
while num_slice_types(new_slices) < len(input.shape):
new_slices.append(slice(None, None, None))
new_gather.append(None)
# Step 3 - Add slice layer (will currently ignore 'None' slices)
starts = []
sizes = []
strides = []
starts_shape_trt = []
sizes_shape_trt = []
strides_shape_trt = []
input_dim = 0
need_dynamic_input = False
one_trt = trt_(ctx.network, input.new_ones(1).int())
for index, s in enumerate(new_slices):
dim_shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt, index, 1, 1)
if input_dim >= len(input_trt.shape):
break
### slice shape and trt slice shape
input_size = int(input.shape[input_dim])
if isinstance(s, slice):
start, size, stride = slice_to_trt(input_size, s)
starts.append(start)
sizes.append(size)
strides.append(stride)
if start<0:
start_trt = ctx.network.add_elementwise(dim_shape_trt, trt_(ctx.network, start), trt.ElementWiseOperation.SUM).get_output(0)
need_dynamic_input = True
else:
start_trt = trt_(ctx.network, start)
starts_shape_trt.append(start_trt)
stride_trt = trt_(ctx.network, stride)
strides_shape_trt.append(stride_trt)
if size<0:
need_dynamic_input = True
size_trt = ctx.network.add_elementwise(dim_shape_trt, trt_(ctx.network, s.stop), trt.ElementWiseOperation.SUM).get_output(0)
size_trt = ctx.network.add_elementwise(size_trt, start_trt, trt.ElementWiseOperation.SUB).get_output(0)
size_trt = ctx.network.add_elementwise(size_trt, one_trt, trt.ElementWiseOperation.SUB).get_output(0)
size_trt = ctx.network.add_elementwise(size_trt, stride_trt, trt.ElementWiseOperation.DIV).get_output(0)
size_trt = ctx.network.add_elementwise(size_trt, one_trt, trt.ElementWiseOperation.SUM).get_output(0)
elif s.stop is None:
need_dynamic_input = True
size_trt = dim_shape_trt
size_trt = ctx.network.add_elementwise(size_trt, start_trt, trt.ElementWiseOperation.SUB).get_output(0)
size_trt = ctx.network.add_elementwise(size_trt, one_trt, trt.ElementWiseOperation.SUB).get_output(0)
size_trt = ctx.network.add_elementwise(size_trt, stride_trt, trt.ElementWiseOperation.DIV).get_output(0)
size_trt = ctx.network.add_elementwise(size_trt, one_trt, trt.ElementWiseOperation.SUM).get_output(0)
else:
size_trt = trt_(ctx.network, size)
sizes_shape_trt.append(size_trt)
input_dim += 1
elif isinstance(s, int):
starts.append(s)
sizes.append(1)
strides.append(1)
if s<0:
need_dynamic_input = True
start_trt = ctx.network.add_elementwise(dim_shape_trt, trt_(ctx.network, s), trt.ElementWiseOperation.SUM).get_output(0)
elif isinstance(s, IntWarper):
need_dynamic_input = True
start_trt = get_intwarper_trt(s, ctx)
else:
start_trt = get_intwarper_trt(s, ctx)
starts_shape_trt.append(start_trt)
sizes_shape_trt.append(trt_(ctx.network, 1))
strides_shape_trt.append(trt_(ctx.network, 1))
input_dim += 1
if not need_dynamic_input:
output_trt = ctx.network.add_slice(input_trt, starts, sizes, strides).get_output(0)
else:
starts_shape_trt = ctx.network.add_concatenation(starts_shape_trt).get_output(0)
sizes_shape_trt = ctx.network.add_concatenation(sizes_shape_trt).get_output(0)
strides_shape_trt = ctx.network.add_concatenation(strides_shape_trt).get_output(0)
slice_layer = ctx.network.add_slice(input_trt, starts, sizes, strides)
slice_layer.set_input(1, starts_shape_trt)
slice_layer.set_input(2, sizes_shape_trt)
slice_layer.set_input(3, strides_shape_trt)
output_trt = slice_layer.get_output(0)
# Step 3.5 - Add gather layer if necessary
for gidx, gather_value in enumerate(new_gather):
if gather_value is None:
continue
if isinstance(gather_value, torch.Tensor):
index_tensor = gather_value
if not hasattr(index_tensor, "_trt"):
index_tensor = index_tensor.int()
else:
index_tensor = input.new_tensor(gather_value).int()
index_tensor_trt = trt_(ctx.network, index_tensor)
output_trt = ctx.network.add_gather(output_trt, index_tensor_trt, gidx).get_output(0)
# Step 4 - Add shuffle layer to insert dimensions for 'None' slices and remove dimensions for 'int' slices
if len(erase_dims) + len(add_dims)>0:
layer = ctx.network.add_shuffle(output_trt)
## full output shape
out_shape_trt = [tensor_trt_get_shape_trt(ctx.network, output_trt, i, 1) for i in range(len(input.shape))]
## if slice is None
for add in add_dims[::-1]:
out_shape_trt = out_shape_trt[:add] + [one_trt] + out_shape_trt[add:]
## if slice is Int
for e in erase_dims:
out_shape_trt[e] = None
out_shape_trt = list(filter(lambda x: x is not None, out_shape_trt))
if len(out_shape_trt)>0:
out_shape_trt = ctx.network.add_concatenation(out_shape_trt).get_output(0)
else:
out_shape_trt = trt_(ctx.network, input.new_ones((1,)).int())
layer.set_input(1, out_shape_trt)
# layer.reshape_dims = tuple(output.shape) # exclude batch
output_trt = layer.get_output(0)
output._trt = output_trt
class LambdaModule(torch.nn.Module):
def __init__(self, fn):
super(LambdaModule, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_tensor_getitem_1d_int():
return LambdaModule(lambda x: x[:, 0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_int():
return LambdaModule(lambda x: x[:, 0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_strided():
return LambdaModule(lambda x: x[:, ::2])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_strided_offset():
return LambdaModule(lambda x: x[:, 1::2])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_strided_range():
return LambdaModule(lambda x: x[:, 1:3:2])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_insert_dim():
return LambdaModule(lambda x: x[:, None])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_insert_dim_ellipsis():
return LambdaModule(lambda x: x[:, None, ...])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_append_dim():
return LambdaModule(lambda x: x[:, ..., None])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_append_2dim():
return LambdaModule(lambda x: x[:, ..., None, None])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_weird_combo():
return LambdaModule(lambda x: x[:, 0:3:4, None, None, 1, ...]) | [
"torch.device"
] | 2 | huliang2016/torch2trt_dynamic | aa55f354a742d26272eae93934d0cff7cd946cbf |
1.4 | from typing import Any, Dict, Optional, Tuple, Type, Union, List
import gym
import torch as th
import torch.multiprocessing as mp
import random
from stable_baselines3.common import logger
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule, TensorDict
from stable_baselines3.bdpi.policies import BDPIPolicy
# Because BDPI uses many critics per agent, and each critic has 2 Q-Networks, sharing them with file descriptors
# exhausts the maximum number of open file descriptors on many Linux distributions. The file_system sharing method
# creates many small files in /dev/shm, that are then shared by file-name. This avoids reaching the maximum number
# of open file descriptors.
mp.set_sharing_strategy('file_system')
def train_model(
model: th.nn.Module,
inp: Union[th.Tensor, TensorDict],
outp: th.Tensor,
gradient_steps: int,
) -> float:
""" Train a PyTorch module on inputs and outputs, minimizing the MSE loss for gradient_steps steps.
:param model: PyTorch module to be trained. It must have a ".optimizer" attribute with an instance of Optimizer in it.
:param inp: Input tensor (or dictionary of tensors if model is a MultiInput model)
:param outp: Expected outputs tensor
:param gradient_steps: Number of gradient steps to execute when minimizing the MSE.
:return: MSE loss (with the 'sum' reduction) after the last gradient step, as a float.
"""
mse_loss = th.nn.MSELoss(reduction='sum')
for i in range(gradient_steps):
predicted = model(inp)
loss = mse_loss(predicted, outp)
model.optimizer.zero_grad()
loss.backward()
model.optimizer.step()
return float(loss.item())
class BDPI(OffPolicyAlgorithm):
"""
Bootstrapped Dual Policy Iteration
Sample-efficient discrete-action RL algorithm, built on one actor trained
to imitate the greedy policy of several Q-Learning critics.
Paper: https://arxiv.org/abs/1903.04193
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values and Actor)
it can be a function of the current progress remaining (from 1 to 0)
:param actor_lr: Conservative Policy Iteration learning rate for the actor (used in a formula, not for Adam gradient steps)
:param critic_lr: Q-Learning "alpha" learning rate for the critics
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
:param threads: Number of threads to use to train the actor and critics in parallel
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance.
"""
def __init__(
self,
policy: Union[str, Type[BDPIPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
actor_lr: float = 0.05,
critic_lr: float = 0.2,
buffer_size: int = 1000000, # 1e6
learning_starts: int = 256,
batch_size: int = 256,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 1,
gradient_steps: int = 20,
threads: int = 1,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Dict[str, Any] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(BDPI, self).__init__(
policy,
env,
BDPIPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
0.0,
gamma,
train_freq,
gradient_steps,
None,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
use_sde=False,
sde_sample_freq=1,
use_sde_at_warmup=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Discrete),
sde_support=False
)
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.threads = threads
self.pool = mp.get_context('spawn').Pool(threads)
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
""" Create the BDPI actor and critics, and make their memory shared across processes.
"""
super(BDPI, self)._setup_model()
self.actor = self.policy.actor
self.criticsA = self.policy.criticsA
self.criticsB = self.policy.criticsB
self.actor.share_memory()
for cA, cB in zip(self.criticsA, self.criticsB):
cA.share_memory()
cB.share_memory()
def _excluded_save_params(self) -> List[str]:
""" Process pools cannot be pickled, so exclude "self.pool" from the saved parameters of BDPI.
"""
return super()._excluded_save_params() + ['pool']
def train(self, gradient_steps: int, batch_size: int = 64) -> None:
""" BDPI Training procedure.
This method is called every time-step (if train_freq=1, as in the original paper).
Every time this method is called, the following steps are performed:
- Every critic, in random order, gets updated with the Clipped DQN equation on its own batch of experiences
- Every critic, just after being updated, computes its greedy policy and updates the actor towards it
- After every critic has been updated, their QA and QB networks are swapped.
This method implements some basic multi-processing:
- Every critic and the actor are PyTorch modules with share_memory() called on them
- A process pool is used to perform the neural network training operations (gradient descent steps)
This approach only has a minimal impact on code, but does not scale very well:
- On the plus side, the actor is trained concurrently by several workers, ala HOGWILD
- However, the predictions (getting Q(next state), computing updated Q-Values and the greedy policy)
all happen sequentially in the main process. With self.threads>8, the bottleneck therefore becomes
the main process, that has to perform all the updates and predictions. The worker processes only
fit neural networks.
"""
# Update optimizers learning rate
optimizers = [self.actor.optimizer] + [c.optimizer for c in self.criticsA] + [c.optimizer for c in self.criticsB]
self._update_learning_rate(optimizers)
# Update every critic (and the actor after each critic), in a random order
critic_losses = []
actor_losses = []
critics = list(zip(self.criticsA, self.criticsB))
random.shuffle(critics)
for criticA, criticB in critics:
# Sample replay buffer
with th.no_grad():
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
# Update the critic (code taken from DQN)
with th.no_grad():
qvA = criticA(replay_data.next_observations)
qvB = criticB(replay_data.next_observations)
qv = th.min(qvA, qvB)
QN = th.arange(replay_data.rewards.shape[0])
next_q_values = qv[QN, qvA.argmax(1)].reshape(-1, 1)
# 1-step TD target
target_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Make real supervised learning target Q-Values (even for non-taken actions)
target_q_values = criticA(replay_data.observations)
actions = replay_data.actions.long().flatten()
target_q_values[QN, actions] += self.critic_lr * (target_values.flatten() - target_q_values[QN, actions])
critic_losses.append(
self.pool.apply_async(train_model, (criticA, replay_data.observations, target_q_values, gradient_steps))
)
logger.record("train/avg_q", float(target_q_values.mean()))
# Update the actor
with th.no_grad():
greedy_actions = target_q_values.argmax(1)
train_probas = th.zeros_like(target_q_values)
train_probas[QN, greedy_actions] = 1.0
# Normalize the direction to be pursued
train_probas /= 1e-6 + train_probas.sum(1)[:, None]
actor_probas = self.actor(replay_data.observations)
# Imitation learning (or distillation, or reward-penalty Pursuit, all these are the same thing)
alr = self.actor_lr
train_probas = (1. - alr) * actor_probas + alr * train_probas
train_probas /= train_probas.sum(-1, keepdim=True)
actor_losses.append(
self.pool.apply_async(train_model, (self.actor, replay_data.observations, train_probas, gradient_steps))
)
# Log losses
for aloss, closs in zip(actor_losses, critic_losses):
logger.record("train/critic_loss", closs.get())
logger.record("train/actor_loss", aloss.get())
# Swap QA and QB
self.criticsA, self.criticsB = self.criticsB, self.criticsA
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "BDPI",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(BDPI, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
| [
"torch.min",
"torch.nn.MSELoss",
"torch.arange",
"torch.no_grad",
"torch.multiprocessing.get_context",
"torch.zeros_like",
"torch.multiprocessing.set_sharing_strategy"
] | 1.4.0 | steckdenis/stable-baselines3 | 248a1174c7ebce67afaddb872fc7cb2c9a6d5720 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
try:
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(
hidden_dim, hidden_dim, nheads, dropout=0.0
)
self.mask_head = MaskHeadSmallConv(
hidden_dim + nheads, [1024, 512, 256], hidden_dim
)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
assert mask is not None
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(
src_proj, mask, self.detr.query_embed.weight, pos[-1]
)
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out["aux_outputs"] = self.detr._set_aux_loss(outputs_class, outputs_coord)
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(
src_proj,
bbox_mask,
[features[2].tensors, features[1].tensors, features[0].tensors],
)
outputs_seg_masks = seg_masks.view(
bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]
)
out["pred_masks"] = outputs_seg_masks
return out
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [
dim,
context_dim // 2,
context_dim // 4,
context_dim // 8,
context_dim // 16,
context_dim // 64,
]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]):
x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask: Optional[Tensor] = None):
q = self.q_linear(q)
k = F.conv2d(
k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias
)
qh = q.view(
q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads
)
kh = k.view(
k.shape[0],
self.num_heads,
self.hidden_dim // self.num_heads,
k.shape[-2],
k.shape[-1],
)
weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights)
weights = self.dropout(weights)
return weights
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(
inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2
):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class PostProcessSegm(nn.Module):
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
@torch.no_grad()
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(
outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False
)
outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
for i, (cur_mask, t, tt) in enumerate(
zip(outputs_masks, max_target_sizes, orig_target_sizes)
):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
).byte()
return results
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API"""
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
"""This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = (
outputs["pred_logits"],
outputs["pred_masks"],
outputs["pred_boxes"],
)
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (
scores > self.threshold
)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(
cur_masks[None], to_tuple(size), mode="bilinear"
).squeeze(0)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(
size=(final_w, final_h), resample=Image.NEAREST
)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes()))
.view(final_h, final_w, 3)
.numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)],
dtype=torch.bool,
device=keep.device,
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append(
{
"id": i,
"isthing": self.is_thing_map[cat],
"category_id": cat,
"area": a,
}
)
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {
"png_string": out.getvalue(),
"segments_info": segments_info,
}
preds.append(predictions)
return preds
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.kaiming_uniform_",
"torch.zeros",
"torch.einsum",
"torch.nn.init.constant_",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.nn.init.xavier_uniform_",
"torch.nn.GroupNorm",
"torch.ones",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.nn.init.zeros_"
] | 1.5.0 | phamquiluan/table-transformer | 1fcbb539640e86659e825a7cfe410270f1686d8d |
1.0 |
import sys
import os
import time
import torch
from onmt.translate.decode_strategy import DecodeStrategy
import numpy as np
from IPython import embed
class BeamSearch(DecodeStrategy):
"""Generation beam search.
Note that the attributes list is not exhaustive. Rather, it highlights
tensors to document their shape. (Since the state variables' "batch"
size decreases as beams finish, we denote this axis with a B rather than
``batch_size``).
Args:
beam_size (int): Number of beams to use (see base ``parallel_paths``).
batch_size (int): See base.
pad (int): See base.
bos (int): See base.
eos (int): See base.
n_best (int): Don't stop until at least this many beams have
reached EOS.
mb_device (torch.device or str): See base ``device``.
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): See base.
max_length (int): See base.
return_attention (bool): See base.
block_ngram_repeat (int): See base.
exclusion_tokens (set[int]): See base.
memory_lengths (LongTensor): Lengths of encodings. Used for
masking attentions.
Attributes:
top_beam_finished (ByteTensor): Shape ``(B,)``.
_batch_offset (LongTensor): Shape ``(B,)``.
_beam_offset (LongTensor): Shape ``(batch_size x beam_size,)``.
alive_seq (LongTensor): See base.
topk_log_probs (FloatTensor): Shape ``(B x beam_size,)``. These
are the scores used for the topk operation.
select_indices (LongTensor or NoneType): Shape
``(B x beam_size,)``. This is just a flat view of the
``_batch_index``.
topk_scores (FloatTensor): Shape
``(B, beam_size)``. These are the
scores a sequence will receive if it finishes.
topk_ids (LongTensor): Shape ``(B, beam_size)``. These are the
word indices of the topk predictions.
_batch_index (LongTensor): Shape ``(B, beam_size)``.
_prev_penalty (FloatTensor or NoneType): Shape
``(B, beam_size)``. Initialized to ``None``.
_coverage (FloatTensor or NoneType): Shape
``(1, B x beam_size, inp_seq_len)``.
hypotheses (list[list[Tuple[Tensor]]]): Contains a tuple
of score (float), sequence (long), and attention (float or None).
"""
def __init__(self, beam_size, batch_size, pad, bos, eos, n_best, mb_device,
global_scorer, min_length, max_length, return_attention,
block_ngram_repeat, exclusion_tokens, memory_lengths,
stepwise_penalty, ratio):
super(BeamSearch, self).__init__(
pad, bos, eos, batch_size, mb_device, beam_size, min_length,
block_ngram_repeat, exclusion_tokens, return_attention,
max_length)
# beam parameters
self.global_scorer = global_scorer
self.beam_size = beam_size
self.n_best = n_best
self.batch_size = batch_size
self.ratio = ratio
# result caching
self.hypotheses = [[] for _ in range(batch_size)]
self.orig_positions = [[] for _ in range(batch_size)]
self.orig_positions_logsorted = [[] for _ in range(batch_size)]
# beam state
self.top_beam_finished = torch.zeros([batch_size], dtype=torch.uint8)
self.best_scores = torch.full([batch_size], -1e10, dtype=torch.float,
device=mb_device)
self._batch_offset = torch.arange(batch_size, dtype=torch.long)
self._beam_offset = torch.arange(
0, batch_size * beam_size, step=beam_size, dtype=torch.long,
device=mb_device)
self.topk_log_probs = torch.tensor(
[0.0] + [float("-inf")] * (beam_size - 1), device=mb_device
).repeat(batch_size)
self.select_indices = None
self._memory_lengths = memory_lengths
# buffers for the topk scores and 'backpointer'
self.topk_scores = torch.empty((batch_size, beam_size),
dtype=torch.float, device=mb_device)
self.topk_ids = torch.empty((batch_size, beam_size), dtype=torch.long,
device=mb_device)
self._batch_index = torch.empty([batch_size, beam_size],
dtype=torch.long, device=mb_device)
self.done = False
# "global state" of the old beam
self._prev_penalty = None
self._coverage = None
self._stepwise_cov_pen = (
stepwise_penalty and self.global_scorer.has_cov_pen)
self._vanilla_cov_pen = (
not stepwise_penalty and self.global_scorer.has_cov_pen)
self._cov_pen = self.global_scorer.has_cov_pen
@property
def current_predictions(self):
return self.alive_seq[:, -1]
@property
def current_origin(self):
return self.select_indices
@property
def current_backptr(self):
# for testing
return self.select_indices.view(self.batch_size, self.beam_size)\
.fmod(self.beam_size)
def advance(self, log_probs, attn, guide, guide_data, explorer, guide_batch_size=64):
vocab_size = log_probs.size(-1)
# using integer division to get an integer _B without casting
_B = log_probs.shape[0] // self.beam_size
if self._stepwise_cov_pen and self._prev_penalty is not None:
# DOES NOT RUN
assert False
self.topk_log_probs += self._prev_penalty
self.topk_log_probs -= self.global_scorer.cov_penalty(
self._coverage + attn, self.global_scorer.beta).view(
_B, self.beam_size)
# force the output to be longer than self.min_length
step = len(self)
self.ensure_min_length(log_probs)
# Multiply by guide probabilities
if guide:
for batch_idx in range(int(np.ceil(guide_data.shape[0] / guide_batch_size))):
idx_from = batch_idx*guide_batch_size
idx_to = (batch_idx+1)*guide_batch_size
guide.apply(inp=guide_data, log_probs=log_probs, idx_from=idx_from, idx_to=idx_to)
# Multiply probs by the beam probability.
log_probs += self.topk_log_probs.view(_B * self.beam_size, 1)
# Block beams where N-grams repeat
self.block_ngram_repeats(log_probs)
# if the sequence ends now, then the penalty is the current
# length + 1, to include the EOS token
length_penalty = self.global_scorer.length_penalty(step + 1, alpha=self.global_scorer.alpha) # return 1.0
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
curr_scores = curr_scores.reshape(_B, self.beam_size * vocab_size) # torch.Size([batch_size, 370875])
# if step>3:
# embed()
# sys.exit(0)
torch.topk(curr_scores, self.beam_size, dim=-1,out=(self.topk_scores, self.topk_ids)) # Returns the k largest elements of the given input tensor along a given dimension., If dim is not given, the last dimension of the input is chosen.
# Recover log probs.
# Length penalty is just a scalar. It doesn't matter if it's applied
# before or after the topk.
torch.mul(self.topk_scores, length_penalty, out=self.topk_log_probs)
# Resolve beam origin and map to batch index flat representation.
torch.div(self.topk_ids, vocab_size, out=self._batch_index) # origin is basically the ID of the beam (log_probs) this came from
self._batch_index += self._beam_offset[:_B].unsqueeze(1)
self.select_indices = self._batch_index.view(_B * self.beam_size)
self.topk_ids.fmod_(vocab_size) # resolve true word ids
# Append last prediction.
self.alive_seq = torch.cat(
[self.alive_seq.index_select(0, self.select_indices),
self.topk_ids.view(_B * self.beam_size, 1)], -1)
if self.return_attention or self._cov_pen:
current_attn = attn.index_select(1, self.select_indices)
if step == 1:
self.alive_attn = current_attn
# update global state (step == 1)
if self._cov_pen: # coverage penalty
# DOES NOT RUN
assert False
self._prev_penalty = torch.zeros_like(self.topk_log_probs)
self._coverage = current_attn
else:
self.alive_attn = self.alive_attn.index_select(
1, self.select_indices)
self.alive_attn = torch.cat([self.alive_attn, current_attn], 0)
# update global state (step > 1)
if self._cov_pen:
# DOES NOT RUN
assert False
self._coverage = self._coverage.index_select(
1, self.select_indices)
self._coverage += current_attn
self._prev_penalty = self.global_scorer.cov_penalty(
self._coverage, beta=self.global_scorer.beta).view(
_B, self.beam_size)
# found = False
# if found or np.any(self.topk_ids.view(_B * self.beam_size, 1).cpu().numpy()==3):
# embed()
# found = True
if self._vanilla_cov_pen:
# DOES NOT RUN
assert False
# shape: (batch_size x beam_size, 1)
cov_penalty = self.global_scorer.cov_penalty(
self._coverage,
beta=self.global_scorer.beta)
self.topk_scores -= cov_penalty.view(_B, self.beam_size)
self.is_finished = self.topk_ids.eq(self.eos)
self.ensure_max_length()
def update_finished(self, stop=False):
# Penalize beams that finished.
_B_old = self.topk_log_probs.shape[0]
step = self.alive_seq.shape[-1] # 1 greater than the step in advance
self.topk_log_probs.masked_fill_(self.is_finished, -1e10)
# on real data (newstest2017) with the pretrained transformer,
# it's faster to not move this back to the original device
self.is_finished = self.is_finished.to('cpu')
self.top_beam_finished |= self.is_finished[:, 0].eq(1)
predictions = self.alive_seq.view(_B_old, self.beam_size, step)
attention = (
self.alive_attn.view(
step - 1, _B_old, self.beam_size, self.alive_attn.size(-1))
if self.alive_attn is not None else None)
non_finished_batch = []
if stop:
embed()
sys.exit(0)
for i in range(self.is_finished.size(0)):
b = self._batch_offset[i]
finished_hyp = self.is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
if self.ratio > 0:
s = self.topk_scores[i, j] / (step + 1)
if self.best_scores[b] < s:
self.best_scores[b] = s
self.orig_positions[b].append(j.item()) # save the original beam position
self.hypotheses[b].append((
self.topk_scores[i, j],
predictions[i, j, 1:], # Ignore start_token.
attention[:, i, j, :self._memory_lengths[i]]
if attention is not None else None))
# End condition is the top beam finished and we can return
# n_best hypotheses.
if self.ratio > 0:
pred_len = self._memory_lengths[i] * self.ratio
finish_flag = ((self.topk_scores[i, 0] / pred_len)
<= self.best_scores[b]) or \
self.is_finished[i].all()
else:
finish_flag = self.top_beam_finished[i] != 0
if finish_flag and len(self.hypotheses[b]) >= self.n_best:
best_hyp = sorted(
zip(self.orig_positions[b], self.hypotheses[b]), key=lambda x: x[1][0], reverse=True)
for n, (orig_pos, (score, pred, attn)) in enumerate(best_hyp):
if n >= self.n_best:
break
# embed()
# sys.exit(0)
self.scores[b].append(score)
self.predictions[b].append(pred) # HERE
self.orig_positions_logsorted[b].append(orig_pos)
self.attention[b].append(
attn if attn is not None else [])
else:
non_finished_batch.append(i)
non_finished = torch.tensor(non_finished_batch)
# If all sentences are translated, no need to go further.
# print("non_finished", len(non_finished))
if len(non_finished) == 0:
self.done = True
return
_B_new = non_finished.shape[0]
# Remove finished batches for the next step.
self.top_beam_finished = self.top_beam_finished.index_select(
0, non_finished)
self._batch_offset = self._batch_offset.index_select(0, non_finished)
non_finished = non_finished.to(self.topk_ids.device)
self.topk_log_probs = self.topk_log_probs.index_select(0,
non_finished)
self._batch_index = self._batch_index.index_select(0, non_finished) # non_finished refers to batch
self.select_indices = self._batch_index.view(_B_new * self.beam_size)
# print("alive_seq shape", self.alive_seq.shape)
self.alive_seq = predictions.index_select(0, non_finished).view(-1, self.alive_seq.size(-1))
# print("alive_seq shape", self.alive_seq.shape)
self.topk_scores = self.topk_scores.index_select(0, non_finished)
self.topk_ids = self.topk_ids.index_select(0, non_finished)
if self.alive_attn is not None:
inp_seq_len = self.alive_attn.size(-1)
self.alive_attn = attention.index_select(1, non_finished) \
.view(step - 1, _B_new * self.beam_size, inp_seq_len)
if self._cov_pen:
self._coverage = self._coverage \
.view(1, _B_old, self.beam_size, inp_seq_len) \
.index_select(1, non_finished) \
.view(1, _B_new * self.beam_size, inp_seq_len)
if self._stepwise_cov_pen:
self._prev_penalty = self._prev_penalty.index_select(
0, non_finished)
| [
"torch.zeros",
"torch.cat",
"torch.mul",
"torch.arange",
"torch.full",
"torch.tensor",
"torch.zeros_like",
"torch.div",
"torch.empty",
"torch.topk"
] | 1.0.1 | marekstrelec/OpenNMT-py | b20ebd3b42414cbfe5b1a4e4ccd1ef341d4fff71 |
1.6 | import copy
import numpy as np
import torch
from ray.rllib.models import MODEL_DEFAULTS
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.utils.schedules import PiecewiseSchedule
from marltoolbox.envs.coin_game import \
CoinGame
from marltoolbox.envs.matrix_sequential_social_dilemma import \
IteratedPrisonersDilemma
from marltoolbox.utils import exploration
ROUNDING_ERROR = 1e-3
def assert_equal_wt_some_epsilon(v1, v2):
delta = torch.abs(v1 - v2)
assert torch.all(delta < ROUNDING_ERROR)
def test_clusterize_by_distance():
output = exploration.clusterize_by_distance(
torch.Tensor([0.0, 0.4, 1.0, 1.4, 1.8, 3.0]), 0.5)
assert_equal_wt_some_epsilon(
output,
torch.Tensor([0.2000, 0.2000, 1.4000, 1.4000, 1.4000, 3.0000]))
output = exploration.clusterize_by_distance(
torch.Tensor([0.0, 0.5, 1.0, 1.4, 1.8, 3.0]), 0.5)
assert_equal_wt_some_epsilon(
output,
torch.Tensor([0.0000, 0.5000, 1.4000, 1.4000, 1.4000, 3.0000]))
output = exploration.clusterize_by_distance(
torch.Tensor([-10.0, -9.8, 1.0, 1.4, 1.8, 3.0]), 0.5)
assert_equal_wt_some_epsilon(
output,
torch.Tensor([-9.9000, -9.9000, 1.4000, 1.4000, 1.4000, 3.0000]))
output = exploration.clusterize_by_distance(
torch.Tensor([-1.0, -0.51, -0.1, 0.0, 0.1, 0.51, 1.0]), 0.5)
assert_equal_wt_some_epsilon(
output,
torch.Tensor([0., 0., 0., 0., 0., 0., 0.]))
class TestSoftQSchedule:
def set_class_to_test(self):
self.class_to_test = exploration.SoftQSchedule
def test__set_temperature_wt_explore(self):
self.set_class_to_test()
self.arrange_for_simple_ipd()
self.softqschedule._set_temperature(
explore=True, timestep=0)
assert self.softqschedule.temperature == self.initial_temperature
self.softqschedule._set_temperature(
explore=True, timestep=self.temperature_timesteps)
assert self.softqschedule.temperature == self.final_temperature
self.softqschedule._set_temperature(
explore=True, timestep=self.temperature_timesteps // 2)
assert abs(self.softqschedule.temperature -
(self.initial_temperature - self.final_temperature) / 2) < \
ROUNDING_ERROR
def test__set_temperature_wtout_explore(self):
self.set_class_to_test()
self.arrange_for_simple_ipd()
self.softqschedule._set_temperature(
explore=False, timestep=0)
assert self.softqschedule.temperature == 1.0
self.softqschedule._set_temperature(
explore=False, timestep=self.temperature_timesteps)
assert self.softqschedule.temperature == 1.0
self.softqschedule._set_temperature(
explore=False, timestep=self.temperature_timesteps // 2)
assert self.softqschedule.temperature == 1.0
def test__set_temperature_wt_explore_wt_multi_steps_schedule(self):
self.class_to_test = exploration.SoftQSchedule
self.arrange_for_multi_step_wt_coin_game()
self.softqschedule._set_temperature(
explore=True, timestep=0)
assert self.softqschedule.temperature == 2.0
self.softqschedule._set_temperature(
explore=True, timestep=2000)
assert self.softqschedule.temperature == 0.1
self.softqschedule._set_temperature(
explore=True, timestep=3000)
assert self.softqschedule.temperature == 0.1
self.softqschedule._set_temperature(
explore=True, timestep=500)
assert abs(self.softqschedule.temperature - 1.25) < ROUNDING_ERROR
self.softqschedule._set_temperature(
explore=True, timestep=1500)
assert abs(self.softqschedule.temperature - 0.3) < ROUNDING_ERROR
def arrange_for_simple_ipd(self):
self.initial_temperature = 1.0
self.final_temperature = 1e-6
self.temperature_timesteps = int(1e5)
self.temperature_schedule = None
self.init_ipd_scheduler()
def arrange_for_multi_step_wt_coin_game(self):
self.initial_temperature = 0.0
self.final_temperature = 0.0
self.temperature_timesteps = 0.0
self.temperature_schedule = PiecewiseSchedule(
endpoints=[
(0, 2.0),
(1000, 0.5),
(2000, 0.1)],
outside_value=0.1,
framework="torch")
self.init_coin_game_scheduler()
def init_ipd_scheduler(self):
self.softqschedule = self.init_scheduler(
IteratedPrisonersDilemma.ACTION_SPACE,
IteratedPrisonersDilemma.OBSERVATION_SPACE
)
def init_coin_game_scheduler(self):
self.softqschedule = self.init_scheduler(
CoinGame.ACTION_SPACE,
CoinGame({}).OBSERVATION_SPACE
)
def init_scheduler(self, action_space, obs_space):
return self.class_to_test(
action_space=action_space,
framework="torch",
initial_temperature=self.initial_temperature,
final_temperature=self.final_temperature,
temperature_timesteps=self.temperature_timesteps,
temperature_schedule=self.temperature_schedule,
policy_config={},
num_workers=0,
worker_index=0,
model=FullyConnectedNetwork(
obs_space=obs_space,
action_space=action_space,
num_outputs=action_space.n,
name="fc",
model_config=MODEL_DEFAULTS
)
)
def test__apply_temperature(self):
self.set_class_to_test()
self.arrange_for_multi_step_wt_coin_game()
for _ in range(10):
self.apply_and_assert_apply_temperature(
temperature=self.random_temperature(),
inputs=self.random_inputs()[0],
)
def apply_and_assert_apply_temperature(self, temperature, inputs):
action_distribution, action_dist_class = \
self.set_temperature_and_get_args(temperature=temperature,
inputs=inputs)
new_action_distribution = self.softqschedule._apply_temperature(
copy.deepcopy(action_distribution), action_dist_class)
assert all(
abs(n_v - v / self.softqschedule.temperature) < ROUNDING_ERROR
for v, n_v in zip(action_distribution.inputs,
new_action_distribution.inputs))
def set_temperature_and_get_args(self, temperature, inputs):
action_dist_class = TorchCategorical
action_distribution = TorchCategorical(
inputs, self.softqschedule.model, temperature=1.0)
self.softqschedule.temperature = temperature
return action_distribution, action_dist_class
def test_get_exploration_action_wtout_explore(self):
self.helper_test_get_exploration_action_wt_explore(explore=False)
def random_inputs(self):
return np.random.random(
size=(1, np.random.randint(1, 50, size=1)[0]))
def random_timestep(self):
return np.random.randint(0, 10000, size=1)[0]
def random_temperature(self):
return np.random.random(size=1)[0] * 10 + 1e-9
def apply_and_assert_get_exploration_action(
self, inputs, explore, timestep):
initial_action_distribution, _ = \
self.set_temperature_and_get_args(temperature=1.0,
inputs=inputs)
action_distribution = copy.deepcopy(initial_action_distribution)
_ = self.softqschedule.get_exploration_action(
action_distribution,
timestep=timestep,
explore=explore
)
temperature = self.softqschedule.temperature if explore else 1.0
errors = [abs(n_v - v / temperature)
for v, n_v in zip(initial_action_distribution.inputs[0],
action_distribution.inputs[0])]
assert all(err < ROUNDING_ERROR for err in errors), f"errors: {errors}"
def test_get_exploration_action_wt_explore(self):
self.helper_test_get_exploration_action_wt_explore(explore=True)
def helper_test_get_exploration_action_wt_explore(self, explore):
self.set_class_to_test()
self.arrange_for_multi_step_wt_coin_game()
for _ in range(10):
self.apply_and_assert_get_exploration_action(
inputs=self.random_inputs(),
explore=explore,
timestep=self.random_timestep())
class TestSoftQScheduleWtClustering(TestSoftQSchedule):
def set_class_to_test(self):
self.class_to_test = exploration.SoftQScheduleWtClustering
def helper_test_get_exploration_action_wt_explore(self, explore):
self.set_class_to_test()
self.arrange_for_multi_step_wt_coin_game()
for inputs in self.get_inputs_list():
self.apply_and_assert_get_exploration_action(
inputs=inputs,
explore=explore,
timestep=self.random_timestep())
def get_inputs_list(self):
return [
[[1.0, 0.0]],
[[5.0, -1.0]],
[[1.0, 1.6]],
[[101, -2.3]],
[[65, 98, 13, 56, 123, 156, 84]],
]
| [
"torch.abs",
"torch.all",
"torch.Tensor"
] | 1.6.0 | longtermrisk/marltoolbox | cae1ba94ccb44700b66a32e0734a0f11c9c6c7fe |
1.6 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.utils.containers import TrainingData
from botorch.utils.testing import BotorchTestCase
class TestContainers(BotorchTestCase):
def test_TrainingData(self):
# block design, without variance observations
X_bd = torch.rand(2, 4, 3)
Y_bd = torch.rand(2, 4, 2)
training_data_bd = TrainingData.from_block_design(X_bd, Y_bd)
self.assertTrue(training_data_bd.is_block_design)
self.assertTrue(torch.equal(training_data_bd.X, X_bd))
self.assertTrue(torch.equal(training_data_bd.Y, Y_bd))
self.assertIsNone(training_data_bd.Yvar)
self.assertTrue(torch.equal(Xi, X_bd) for Xi in training_data_bd.Xs)
self.assertTrue(torch.equal(training_data_bd.Ys[0], Y_bd[..., :1]))
self.assertTrue(torch.equal(training_data_bd.Ys[1], Y_bd[..., 1:]))
self.assertIsNone(training_data_bd.Yvars)
# test equality check with null Yvars and one-element Xs ans Ys
self.assertEqual(
training_data_bd,
TrainingData(Xs=[X_bd] * 2, Ys=list(torch.split(Y_bd, 1, dim=-1))),
)
# block design, with variance observations
Yvar_bd = torch.rand(2, 4, 2)
training_data_bd = TrainingData.from_block_design(X_bd, Y_bd, Yvar_bd)
self.assertTrue(training_data_bd.is_block_design)
self.assertTrue(torch.equal(training_data_bd.X, X_bd))
self.assertTrue(torch.equal(training_data_bd.Y, Y_bd))
self.assertTrue(torch.equal(training_data_bd.Yvar, Yvar_bd))
self.assertTrue(torch.equal(Xi, X_bd) for Xi in training_data_bd.Xs)
self.assertTrue(torch.equal(training_data_bd.Ys[0], Y_bd[..., :1]))
self.assertTrue(torch.equal(training_data_bd.Ys[1], Y_bd[..., 1:]))
self.assertTrue(torch.equal(training_data_bd.Yvars[0], Yvar_bd[..., :1]))
self.assertTrue(torch.equal(training_data_bd.Yvars[1], Yvar_bd[..., 1:]))
# test equality check with non-null Yvars and one-element Xs ans Ys
self.assertEqual(
training_data_bd,
TrainingData(
Xs=[X_bd] * 2,
Ys=list(torch.split(Y_bd, 1, dim=-1)),
Yvars=list(torch.split(Yvar_bd, 1, dim=-1)),
),
)
# non-block design, without variance observations
Xs = [torch.rand(2, 4, 3), torch.rand(2, 3, 3)]
Ys = [torch.rand(2, 4, 2), torch.rand(2, 3, 2)]
training_data_nbd = TrainingData(Xs, Ys)
self.assertFalse(training_data_nbd.is_block_design)
self.assertTrue(torch.equal(training_data_nbd.Xs[0], Xs[0]))
self.assertTrue(torch.equal(training_data_nbd.Xs[1], Xs[1]))
self.assertTrue(torch.equal(training_data_nbd.Ys[0], Ys[0]))
self.assertTrue(torch.equal(training_data_nbd.Ys[1], Ys[1]))
self.assertIsNone(training_data_nbd.Yvars)
with self.assertRaises(UnsupportedError):
training_data_nbd.X
with self.assertRaises(UnsupportedError):
training_data_nbd.Y
self.assertIsNone(training_data_nbd.Yvar)
# test equality check with different length Xs and Ys in two training data
# and only one training data including non-null Yvars
self.assertNotEqual(training_data_nbd, training_data_bd)
# test equality of two training datas with different legth Xs/Ys
training_data_nbd_X = TrainingData(
Xs=Xs + [torch.rand(2, 2, 3)],
Ys=Ys,
)
self.assertNotEqual(training_data_nbd, training_data_nbd_X)
training_data_nbd_Y = TrainingData(
Xs=Xs,
Ys=Ys + [torch.rand(2, 2, 2)],
)
self.assertNotEqual(training_data_nbd, training_data_nbd_Y)
# non-block design, with variance observations
Yvars = [torch.rand(2, 4, 2), torch.rand(2, 3, 2)]
training_data_nbd_yvar = TrainingData(Xs, Ys, Yvars)
self.assertFalse(training_data_nbd_yvar.is_block_design)
self.assertTrue(torch.equal(training_data_nbd_yvar.Xs[0], Xs[0]))
self.assertTrue(torch.equal(training_data_nbd_yvar.Xs[1], Xs[1]))
self.assertTrue(torch.equal(training_data_nbd_yvar.Ys[0], Ys[0]))
self.assertTrue(torch.equal(training_data_nbd_yvar.Ys[1], Ys[1]))
self.assertTrue(torch.equal(training_data_nbd_yvar.Yvars[0], Yvars[0]))
self.assertTrue(torch.equal(training_data_nbd_yvar.Yvars[1], Yvars[1]))
with self.assertRaises(UnsupportedError):
training_data_nbd_yvar.X
with self.assertRaises(UnsupportedError):
training_data_nbd_yvar.Y
with self.assertRaises(UnsupportedError):
training_data_nbd_yvar.Yvar
# test equality check with same length Xs and Ys in two training data but
# with variance observations only in one
self.assertNotEqual(training_data_nbd, training_data_nbd_yvar)
# test equality check with different length Xs and Ys in two training data
self.assertNotEqual(training_data_nbd_yvar, training_data_bd)
# implicit block design, without variance observations
X = torch.rand(2, 4, 3)
Xs = [X] * 2
Ys = [torch.rand(2, 4, 2), torch.rand(2, 4, 2)]
training_data = TrainingData(Xs, Ys)
self.assertTrue(training_data.is_block_design)
self.assertTrue(torch.equal(training_data.X, X))
self.assertTrue(torch.equal(training_data.Y, torch.cat(Ys, dim=-1)))
self.assertIsNone(training_data.Yvar)
self.assertTrue(torch.equal(training_data.Xs[0], X))
self.assertTrue(torch.equal(training_data.Xs[1], X))
self.assertTrue(torch.equal(training_data.Ys[0], Ys[0]))
self.assertTrue(torch.equal(training_data.Ys[1], Ys[1]))
self.assertIsNone(training_data.Yvars)
# implicit block design, with variance observations
Yvars = [torch.rand(2, 4, 2), torch.rand(2, 4, 2)]
training_data = TrainingData(Xs, Ys, Yvars)
self.assertTrue(training_data.is_block_design)
self.assertTrue(torch.equal(training_data.X, X))
self.assertTrue(torch.equal(training_data.Y, torch.cat(Ys, dim=-1)))
self.assertTrue(torch.equal(training_data.Yvar, torch.cat(Yvars, dim=-1)))
self.assertTrue(torch.equal(training_data.Xs[0], X))
self.assertTrue(torch.equal(training_data.Xs[1], X))
self.assertTrue(torch.equal(training_data.Ys[0], Ys[0]))
self.assertTrue(torch.equal(training_data.Ys[1], Ys[1]))
self.assertTrue(torch.equal(training_data.Yvars[0], Yvars[0]))
self.assertTrue(torch.equal(training_data.Yvars[1], Yvars[1]))
# test equality with same Xs and Ys but different-length Yvars
self.assertNotEqual(
TrainingData(Xs, Ys, Yvars),
TrainingData(Xs, Ys, Yvars[:1]),
)
| [
"torch.rand",
"torch.cat",
"torch.equal",
"torch.split"
] | 1.6 | ngam/botorch | c6bc8f9a82c4959cc209335fefd8b906023edd1e |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import unittest
from copy import deepcopy
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from omegaconf import OmegaConf
class SimpleModule(BaseModel):
def __init__(self, config={}):
super().__init__(config)
self.base = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, x, target):
x = self.classifier(self.base(x))
return {"losses": {"total_loss": self.loss(x, target)}}
class NumbersDataset(torch.utils.data.Dataset):
def __init__(self):
self.samples = list(range(1, 1001))
def __getitem__(self, idx):
return self.samples[idx]
def __len__(self):
return len(self.samples)
class TestLogisticsCallback(unittest.TestCase):
def setUp(self):
self.trainer = argparse.Namespace()
self.config = OmegaConf.create(
{
"model": "simple",
"model_config": {},
"training": {
"lr_scheduler": True,
"lr_ratio": 0.1,
"lr_steps": [1, 2],
"use_warmup": False,
},
}
)
# Keep original copy for testing purposes
self.trainer.config = deepcopy(self.config)
registry.register("config", self.trainer.config)
self.trainer.model = SimpleModule()
self.trainer.val_loader = torch.utils.data.DataLoader(
NumbersDataset(), batch_size=self.config.training.batch_size
)
self.trainer.optimizer = torch.optim.Adam(
self.trainer.model.parameters(), lr=1e-01
)
self.trainer.lr_scheduler_callback = LRSchedulerCallback(
self.config, self.trainer
)
def tearDown(self):
registry.unregister("config")
def test_on_update_end(self):
self.trainer.lr_scheduler_callback.on_update_end()
self.assertAlmostEqual(self.trainer.optimizer.param_groups[0]["lr"], 1e-02)
self.trainer.lr_scheduler_callback.on_update_end()
self.assertAlmostEqual(self.trainer.optimizer.param_groups[0]["lr"], 1e-03)
| [
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss"
] | 1.6.0 | simran2905/mmf | c8f47a23b85a87d14616c2f53e81693a25ea929a |
1.4 | import argparse
import datetime
import os
import pprint
import numpy as np
import torch
from atari_network import DQN
from atari_wrapper import make_atari_env
from torch.utils.tensorboard import SummaryWriter
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.policy import DQNPolicy
from tianshou.policy.modelbased.icm import ICMPolicy
from tianshou.trainer import offpolicy_trainer
from tianshou.utils import TensorboardLogger, WandbLogger
from tianshou.utils.net.discrete import IntrinsicCuriosityModule
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="PongNoFrameskip-v4")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--scale-obs", type=int, default=0)
parser.add_argument("--eps-test", type=float, default=0.005)
parser.add_argument("--eps-train", type=float, default=1.)
parser.add_argument("--eps-train-final", type=float, default=0.05)
parser.add_argument("--buffer-size", type=int, default=100000)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--n-step", type=int, default=3)
parser.add_argument("--target-update-freq", type=int, default=500)
parser.add_argument("--epoch", type=int, default=100)
parser.add_argument("--step-per-epoch", type=int, default=100000)
parser.add_argument("--step-per-collect", type=int, default=10)
parser.add_argument("--update-per-step", type=float, default=0.1)
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--training-num", type=int, default=10)
parser.add_argument("--test-num", type=int, default=10)
parser.add_argument("--logdir", type=str, default="log")
parser.add_argument("--render", type=float, default=0.)
parser.add_argument(
"--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu"
)
parser.add_argument("--frames-stack", type=int, default=4)
parser.add_argument("--resume-path", type=str, default=None)
parser.add_argument("--resume-id", type=str, default=None)
parser.add_argument(
"--logger",
type=str,
default="tensorboard",
choices=["tensorboard", "wandb"],
)
parser.add_argument("--wandb-project", type=str, default="atari.benchmark")
parser.add_argument(
"--watch",
default=False,
action="store_true",
help="watch the play of pre-trained policy only"
)
parser.add_argument("--save-buffer-name", type=str, default=None)
parser.add_argument(
"--icm-lr-scale",
type=float,
default=0.,
help="use intrinsic curiosity module with this lr scale"
)
parser.add_argument(
"--icm-reward-scale",
type=float,
default=0.01,
help="scaling factor for intrinsic curiosity reward"
)
parser.add_argument(
"--icm-forward-loss-weight",
type=float,
default=0.2,
help="weight for the forward model loss in ICM"
)
return parser.parse_args()
def test_dqn(args=get_args()):
env, train_envs, test_envs = make_atari_env(
args.task,
args.seed,
args.training_num,
args.test_num,
scale=args.scale_obs,
frame_stack=args.frames_stack,
)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
# should be N_FRAMES x H x W
print("Observations shape:", args.state_shape)
print("Actions shape:", args.action_shape)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# define model
net = DQN(*args.state_shape, args.action_shape, args.device).to(args.device)
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
# define policy
policy = DQNPolicy(
net,
optim,
args.gamma,
args.n_step,
target_update_freq=args.target_update_freq
)
if args.icm_lr_scale > 0:
feature_net = DQN(
*args.state_shape, args.action_shape, args.device, features_only=True
)
action_dim = np.prod(args.action_shape)
feature_dim = feature_net.output_dim
icm_net = IntrinsicCuriosityModule(
feature_net.net,
feature_dim,
action_dim,
hidden_sizes=[512],
device=args.device
)
icm_optim = torch.optim.Adam(icm_net.parameters(), lr=args.lr)
policy = ICMPolicy(
policy, icm_net, icm_optim, args.icm_lr_scale, args.icm_reward_scale,
args.icm_forward_loss_weight
).to(args.device)
# load a previous policy
if args.resume_path:
policy.load_state_dict(torch.load(args.resume_path, map_location=args.device))
print("Loaded agent from: ", args.resume_path)
# replay buffer: `save_last_obs` and `stack_num` can be removed together
# when you have enough RAM
buffer = VectorReplayBuffer(
args.buffer_size,
buffer_num=len(train_envs),
ignore_obs_next=True,
save_only_last_obs=True,
stack_num=args.frames_stack
)
# collector
train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
test_collector = Collector(policy, test_envs, exploration_noise=True)
# log
now = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
args.algo_name = "dqn_icm" if args.icm_lr_scale > 0 else "dqn"
log_name = os.path.join(args.task, args.algo_name, str(args.seed), now)
log_path = os.path.join(args.logdir, log_name)
# logger
if args.logger == "wandb":
logger = WandbLogger(
save_interval=1,
name=log_name.replace(os.path.sep, "__"),
run_id=args.resume_id,
config=args,
project=args.wandb_project,
)
writer = SummaryWriter(log_path)
writer.add_text("args", str(args))
if args.logger == "tensorboard":
logger = TensorboardLogger(writer)
else: # wandb
logger.load(writer)
def save_best_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth"))
def stop_fn(mean_rewards):
if env.spec.reward_threshold:
return mean_rewards >= env.spec.reward_threshold
elif "Pong" in args.task:
return mean_rewards >= 20
else:
return False
def train_fn(epoch, env_step):
# nature DQN setting, linear decay in the first 1M steps
if env_step <= 1e6:
eps = args.eps_train - env_step / 1e6 * \
(args.eps_train - args.eps_train_final)
else:
eps = args.eps_train_final
policy.set_eps(eps)
if env_step % 1000 == 0:
logger.write("train/env_step", env_step, {"train/eps": eps})
def test_fn(epoch, env_step):
policy.set_eps(args.eps_test)
def save_checkpoint_fn(epoch, env_step, gradient_step):
# see also: https://pytorch.org/tutorials/beginner/saving_loading_models.html
ckpt_path = os.path.join(log_path, "checkpoint.pth")
torch.save({"model": policy.state_dict()}, ckpt_path)
return ckpt_path
# watch agent's performance
def watch():
print("Setup test envs ...")
policy.eval()
policy.set_eps(args.eps_test)
test_envs.seed(args.seed)
if args.save_buffer_name:
print(f"Generate buffer with size {args.buffer_size}")
buffer = VectorReplayBuffer(
args.buffer_size,
buffer_num=len(test_envs),
ignore_obs_next=True,
save_only_last_obs=True,
stack_num=args.frames_stack
)
collector = Collector(policy, test_envs, buffer, exploration_noise=True)
result = collector.collect(n_step=args.buffer_size)
print(f"Save buffer into {args.save_buffer_name}")
# Unfortunately, pickle will cause oom with 1M buffer size
buffer.save_hdf5(args.save_buffer_name)
else:
print("Testing agent ...")
test_collector.reset()
result = test_collector.collect(
n_episode=args.test_num, render=args.render
)
rew = result["rews"].mean()
print(f"Mean reward (over {result['n/ep']} episodes): {rew}")
if args.watch:
watch()
exit(0)
# test train_collector and start filling replay buffer
train_collector.collect(n_step=args.batch_size * args.training_num)
# trainer
result = offpolicy_trainer(
policy,
train_collector,
test_collector,
args.epoch,
args.step_per_epoch,
args.step_per_collect,
args.test_num,
args.batch_size,
train_fn=train_fn,
test_fn=test_fn,
stop_fn=stop_fn,
save_best_fn=save_best_fn,
logger=logger,
update_per_step=args.update_per_step,
test_in_train=False,
resume_from_log=args.resume_id is not None,
save_checkpoint_fn=save_checkpoint_fn,
)
pprint.pprint(result)
watch()
if __name__ == "__main__":
test_dqn(get_args())
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"torch.utils.tensorboard.SummaryWriter"
] | 1.4.0 | quangr/tianshou | 110114e134bc0b7cf17973882e6383842e48dab3 |
1.2 | import torch
import torch.nn.functional as F
from collections import OrderedDict
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.transform import resize_boxes
class FRCNN_FPN(FasterRCNN):
def __init__(self, num_classes):
backbone = resnet_fpn_backbone('resnet50', False)
super(FRCNN_FPN, self).__init__(backbone, num_classes)
# self.im_info = {
# 'channel': 'RGB',
# 'scale': (255, 255, 255),
# 'mean': (0.485, 0.456, 0.406),
# 'var': (0.229, 0.224, 0.225),
# }
self.im_info = {
'channel': 'RGB',
'scale': (255, 255, 255),
'mean': (0., 0., 0.),
'var': (1, 1, 1),
}
def detect(self, img):
device = list(self.parameters())[0].device
img = img.to(device)
detections = self(img)[0]
return detections['boxes'].detach(), detections['scores'].detach()
def predict_boxes(self, images, boxes):
device = list(self.parameters())[0].device
images = images.to(device)
boxes = boxes.to(device)
targets = None
original_image_sizes = [img.shape[-2:] for img in images]
images, targets = self.transform(images, targets)
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([(0, features)])
# proposals, proposal_losses = self.rpn(images, features, targets)
boxes = resize_boxes(
boxes, original_image_sizes[0], images.image_sizes[0])
proposals = [boxes]
box_features = self.roi_heads.box_roi_pool(
features, proposals, images.image_sizes)
box_features = self.roi_heads.box_head(box_features)
class_logits, box_regression = self.roi_heads.box_predictor(
box_features)
pred_boxes = self.roi_heads.box_coder.decode(box_regression, proposals)
pred_scores = F.softmax(class_logits, -1)
pred_boxes = pred_boxes[:, 1:].squeeze(dim=1).detach()
pred_boxes = resize_boxes(
pred_boxes, images.image_sizes[0], original_image_sizes[0])
pred_scores = pred_scores[:, 1:].squeeze(dim=1).detach()
return pred_boxes, pred_scores
def load_image(self, img):
pass
| [
"torch.nn.functional.softmax"
] | 1.2.0 | liuqk3/GSM | 188965b3a11f9cdbe166d79cac7cd2e9fb4c1785 |
1.9 | #!/usr/bin/env python3
import unittest
from unittest import mock
import torch
from Lgpytorch import settings
from Lgpytorch.lazy import (
ConstantDiagLazyTensor,
DiagLazyTensor,
KroneckerProductAddedDiagLazyTensor,
KroneckerProductDiagLazyTensor,
KroneckerProductLazyTensor,
NonLazyTensor,
)
from Lgpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestKroneckerProductAddedDiagLazyTensor(unittest.TestCase, LazyTensorTestCase):
# this lazy tensor has an explicit inverse so we don't need to run these
skip_slq_tests = True
tolerances = {
**LazyTensorTestCase.tolerances,
# symeig (used in Kronecker algebra) yields less precise solves
"grad": {"rtol": 0.03, "atol": 1e-4},
"inv_matmul": {"rtol": 0.02, "atol": 1e-4},
}
def create_lazy_tensor(self):
a = torch.tensor([[4, 0, 2], [0, 3, -1], [2, -1, 3]], dtype=torch.float)
b = torch.tensor([[2, 1], [1, 2]], dtype=torch.float)
c = torch.tensor([[4, 0.5, 1, 0], [0.5, 4, -1, 0], [1, -1, 3, 0], [0, 0, 0, 4]], dtype=torch.float)
d = 0.5 * torch.rand(24, dtype=torch.float)
a.requires_grad_(True)
b.requires_grad_(True)
c.requires_grad_(True)
d.requires_grad_(True)
kp_lazy_tensor = KroneckerProductLazyTensor(NonLazyTensor(a), NonLazyTensor(b), NonLazyTensor(c))
diag_lazy_tensor = DiagLazyTensor(d)
return KroneckerProductAddedDiagLazyTensor(kp_lazy_tensor, diag_lazy_tensor)
def evaluate_lazy_tensor(self, lazy_tensor):
tensor = lazy_tensor._lazy_tensor.evaluate()
diag = lazy_tensor._diag_tensor._diag
return tensor + diag.diag()
class TestKroneckerProductAddedKroneckerDiagLazyTensor(TestKroneckerProductAddedDiagLazyTensor):
# this lazy tensor has an explicit inverse so we don't need to run these
skip_slq_tests = True
should_call_cg = False
should_call_lanczos = False
def create_lazy_tensor(self):
a = torch.tensor([[4, 0, 2], [0, 3, -1], [2, -1, 3]], dtype=torch.float)
b = torch.tensor([[2, 1], [1, 2]], dtype=torch.float)
c = torch.tensor([[4, 0.5, 1, 0], [0.5, 4, -1, 0], [1, -1, 3, 0], [0, 0, 0, 4]], dtype=torch.float)
d = torch.tensor([2, 1, 3], dtype=torch.float)
e = torch.tensor([5], dtype=torch.float)
f = torch.tensor([2.5], dtype=torch.float)
a.requires_grad_(True)
b.requires_grad_(True)
c.requires_grad_(True)
d.requires_grad_(True)
e.requires_grad_(True)
f.requires_grad_(True)
kp_lazy_tensor = KroneckerProductLazyTensor(NonLazyTensor(a), NonLazyTensor(b), NonLazyTensor(c))
diag_lazy_tensor = KroneckerProductDiagLazyTensor(
DiagLazyTensor(d), ConstantDiagLazyTensor(e, diag_shape=2), ConstantDiagLazyTensor(f, diag_shape=4)
)
return KroneckerProductAddedDiagLazyTensor(kp_lazy_tensor, diag_lazy_tensor)
class TestKroneckerProductAddedKroneckerConstDiagLazyTensor(TestKroneckerProductAddedKroneckerDiagLazyTensor):
should_call_lanczos = True
def create_lazy_tensor(self):
a = torch.tensor([[4, 0, 2], [0, 3, -1], [2, -1, 3]], dtype=torch.float)
b = torch.tensor([[2, 1], [1, 2]], dtype=torch.float)
c = torch.tensor([[4, 0.5, 1, 0], [0.5, 4, -1, 0], [1, -1, 3, 0], [0, 0, 0, 4]], dtype=torch.float)
d = torch.tensor([2], dtype=torch.float)
e = torch.tensor([5], dtype=torch.float)
f = torch.tensor([2.5], dtype=torch.float)
a.requires_grad_(True)
b.requires_grad_(True)
c.requires_grad_(True)
d.requires_grad_(True)
e.requires_grad_(True)
f.requires_grad_(True)
kp_lazy_tensor = KroneckerProductLazyTensor(NonLazyTensor(a), NonLazyTensor(b), NonLazyTensor(c))
diag_lazy_tensor = KroneckerProductDiagLazyTensor(
ConstantDiagLazyTensor(d, diag_shape=3),
ConstantDiagLazyTensor(e, diag_shape=2),
ConstantDiagLazyTensor(f, diag_shape=4),
)
return KroneckerProductAddedDiagLazyTensor(kp_lazy_tensor, diag_lazy_tensor)
class TestKroneckerProductAddedConstDiagLazyTensor(TestKroneckerProductAddedDiagLazyTensor):
should_call_cg = False
should_call_lanczos = False
def create_lazy_tensor(self):
a = torch.tensor([[4, 0, 2], [0, 3, -1], [2, -1, 3]], dtype=torch.float)
b = torch.tensor([[2, 1], [1, 2]], dtype=torch.float)
c = torch.tensor([[4, 0.5, 1, 0], [0.5, 4, -1, 0], [1, -1, 3, 0], [0, 0, 0, 4]], dtype=torch.float)
a.requires_grad_(True)
b.requires_grad_(True)
c.requires_grad_(True)
kp_lazy_tensor = KroneckerProductLazyTensor(NonLazyTensor(a), NonLazyTensor(b), NonLazyTensor(c))
diag_lazy_tensor = ConstantDiagLazyTensor(
torch.tensor([0.25], dtype=torch.float, requires_grad=True),
kp_lazy_tensor.shape[-1],
)
return KroneckerProductAddedDiagLazyTensor(kp_lazy_tensor, diag_lazy_tensor)
def test_if_cholesky_used(self):
lazy_tensor = self.create_lazy_tensor()
rhs = torch.randn(lazy_tensor.size(-1))
# Check that cholesky is not called
with mock.patch.object(lazy_tensor, "cholesky") as chol_mock:
self._test_inv_matmul(rhs, cholesky=False)
chol_mock.assert_not_called()
def test_root_inv_decomposition_no_cholesky(self):
with settings.max_cholesky_size(0):
lazy_tensor = self.create_lazy_tensor()
test_mat = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 5)
# Check that cholesky is not called
with mock.patch.object(lazy_tensor, "cholesky") as chol_mock:
root_approx = lazy_tensor.root_inv_decomposition()
res = root_approx.matmul(test_mat)
actual = torch.linalg.solve(lazy_tensor.evaluate(), test_mat)
self.assertAllClose(res, actual, rtol=0.05, atol=0.02)
chol_mock.assert_not_called()
if __name__ == "__main__":
unittest.main()
| [
"torch.rand",
"torch.tensor"
] | 1.9 | Mehdishishehbor/gpytorch | 432e537b3f6679ea4ab3acf33b14626b7e161c92 |
1.2 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 13:01:15 2019
@author: WT
"""
import torch
import torch.nn as nn
### create masks for src & trg sequences
def create_masks(src, trg):
src_mask = (src == 1).unsqueeze(-2).bool()
if trg is not None:
trg_mask = (trg == 1).unsqueeze(-2).bool()
else:
trg_mask = None
src_mask = src_mask[:,0,:]
trg_mask = trg_mask[:,0,:]
return src_mask, trg_mask
def create_trg_mask(trg, cuda):
trg_mask = (trg == 1).unsqueeze(-2).bool()
trg_mask = trg_mask[:,0,:]
return trg_mask
class pyTransformer(nn.Module):
def __init__(self, src_vocab, trg_vocab, d_model, ff_dim, num, n_heads,\
max_encoder_len=80, max_decoder_len=80):
super(pyTransformer, self).__init__()
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.d_model = d_model
self.ff_dim = ff_dim
self.num = num
self.n_heads = n_heads
self.max_encoder_len = max_encoder_len
self.max_decoder_len = max_decoder_len
self.embed1 = nn.Embedding(src_vocab, d_model)
self.embed2 = nn.Embedding(trg_vocab, d_model)
self.transformer = nn.Transformer(d_model=d_model, nhead=n_heads, num_encoder_layers=num,\
num_decoder_layers=num, dim_feedforward=ff_dim, dropout=0.1)
self.fc1 = nn.Linear(d_model, trg_vocab)
def forward(self, src, trg, src_mask, trg_mask=None, infer=False, trg_vocab_obj=None):
#print(src[0,:], trg[0,:])
src = self.embed1(src)
trg = self.embed2(trg)
src = src.permute(1,0,2)
trg = trg.permute(1,0,2)
out = self.transformer(src, trg, src_key_padding_mask=src_mask, \
tgt_key_padding_mask=trg_mask)
out = out.permute(1,0,2)
out = self.fc1(out)
#print(out.shape)
#print(out[0,:,:])
return out
@classmethod # src_vocab, trg_vocab, d_model, num, n_heads
def load_model(cls, path):
checkpoint = torch.load(path)
model = cls(src_vocab=checkpoint["src_vocab"], \
trg_vocab=checkpoint["trg_vocab"], \
d_model=checkpoint["d_model"], \
ff_dim=checkpoint["ff_dim"], \
num=checkpoint["num"], \
n_heads=checkpoint["n_heads"], \
max_encoder_len=checkpoint["max_encoder_len"], \
max_decoder_len=checkpoint["max_decoder_len"], \
)
model.load_state_dict(checkpoint['state_dict'])
return model
def save_state(self, epoch, optimizer, scheduler, best_acc, path):
state = {
'epoch': epoch + 1,\
'state_dict': self.state_dict(),\
'best_acc': best_acc,\
'optimizer' : optimizer.state_dict(),\
'scheduler' : scheduler.state_dict(),\
'src_vocab' : self.src_vocab,\
'trg_vocab': self.trg_vocab,\
'd_model': self.d_model,\
'ff_dim': self.ff_dim,\
'num': self.num,\
'n_heads': self.n_heads,\
'max_encoder_len': self.max_encoder_len,\
'max_decoder_len': self.max_decoder_len,
}
torch.save(state, path) | [
"torch.nn.Linear",
"torch.nn.Transformer",
"torch.save",
"torch.load",
"torch.nn.Embedding"
] | 1.2.0 | jackashore/NLP_Toolkit | e5bd8bcfad87f4906c45e66351adf93bd5c2727f |
1.9 | import torch
from environments import PendulumEnv, D4RLEnv
# Evaluate agent with deterministic policy π
def evaluate_agent(agent, num_episodes, env_type=PendulumEnv, env_name='', seed=1, return_trajectories=False, render=False):
env = env_type(env_name)
env.seed(seed)
returns, trajectories = [], []
if render: env.render() # PyBullet requires creating render window before first env reset, and then updates without requiring first call
with torch.inference_mode():
for _ in range(num_episodes):
states, actions, rewards = [], [], []
state, terminal = env.reset(), False
while not terminal:
action = agent.get_greedy_action(state) # Take greedy action
state, reward, terminal = env.step(action)
if return_trajectories:
states.append(state)
actions.append(action)
rewards.append(reward)
returns.append(sum(rewards))
if return_trajectories:
# Collect trajectory data (including terminal signal, which may be needed for offline learning)
terminals = torch.cat([torch.zeros(len(rewards) - 1), torch.ones(1)])
trajectories.append({'states': torch.cat(states), 'actions': torch.cat(actions), 'rewards': torch.tensor(rewards, dtype=torch.float32), 'terminals': terminals})
env.close()
return (returns, trajectories) if return_trajectories else returns
| [
"torch.cat",
"torch.inference_mode",
"torch.tensor",
"torch.ones"
] | 1.9 | wx-b/imitation-learning | 21d0663d4f350e7dd01a7843386965fd52e40a23 |
0.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torch.autograd import Variable
import numpy as np
# Universal import block
# Block to get the relative imports working
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import config
import prebuilt_loss_functions as plf
import loss_functions as lf
import utils.pytorch_utils as utils
import utils.image_utils as img_utils
import cifar10.cifar_loader as cifar_loader
import cifar10.cifar_resnets as cifar_resnets
import adversarial_attacks as aa
import adversarial_training as advtrain
import adversarial_evaluation as adveval
import utils.checkpoints as checkpoints
# Load up dataLoader, classifier, normer
use_gpu = torch.cuda.is_available()
classifier_net = cifar_loader.load_pretrained_cifar_resnet(flavor=32,
use_gpu=use_gpu)
classifier_net.eval()
val_loader = cifar_loader.load_cifar_data('val', normalize=False,
use_gpu=use_gpu)
cifar_normer = utils.DifferentiableNormalize(mean=config.CIFAR10_MEANS,
std=config.CIFAR10_STDS)
examples, labels = next(iter(val_loader))
# build loss fxn and attack object
loss_fxn = plf.VanillaXentropy(classifier_net, normalizer=cifar_normer)
spatial_attack = aa.SpatialPGDLp(classifier_net, cifar_normer, loss_fxn, 'inf')
outputs = spatial_attack.attack(examples, labels, 0.1, 20, verbose=True)
| [
"torch.cuda.is_available"
] | 0.4.0 | jonasnm/geometric-certificates | 8730abaf2ab0c8972a2d40168d5fe64c8670fc62 |
1.7 | """
This section covers the interface for `NERDA` models, that is
implemented as its own Python class [NERDA.models.NERDA][].
The interface enables you to easily
- specify your own [NERDA.models.NERDA][] model
- train it
- evaluate it
- use it to predict entities in new texts.
"""
from .datasets import get_conll_data
from .networks import NERDANetwork
from .predictions import predict, predict_text
from .performance import compute_f1_scores
from .training import train_model
import pandas as pd
import numpy as np
import torch
import os
import sys
import sklearn.preprocessing
from transformers import AutoModel, AutoTokenizer, AutoConfig
from typing import List
class NERDA:
"""NERDA model
A NERDA model object containing a complete model configuration.
The model can be trained with the `train` method. Afterwards
new observations can be predicted with the `predict` and
`predict_text` methods. The performance of the model can be
evaluated on a set of new observations with the
`evaluate_performance` method.
Examples:
Model for a VERY small subset (5 observations) of English NER data
>>> from NERDA.dataset import get_conll_data
>>> trn = get_conll_data('train', 5)
>>> valid = get_conll_data('valid', 5)
>>> tag_scheme = ['B-PER', 'I-PER' 'B-LOC', 'I-LOC',
'B-ORG', 'I-ORG', 'B-MISC, 'I-MISC']
>>> tag_outside = 'O'
>>> transformer = 'bert-base-multilingual-uncased'
>>> model = NERDA(transformer = transformer,
tag_scheme = tag_scheme,
tag_outside = tag_outside,
dataset_training = trn,
dataset_validation = valid)
Model for complete English NER data set CoNLL-2003 with modified hyperparameters
>>> trn = get_conll_data('train')
>>> valid = get_conll_data('valid')
>>> transformer = 'bert-base-multilingual-uncased'
>>> hyperparameters = {'epochs' : 3,
'warmup_steps' : 400,
'train_batch_size': 16,
'learning_rate': 0.0001},
>>> model = NERDA(transformer = transformer,
dataset_training = trn,
dataset_validation = valid,
tag_scheme = tag_scheme,
tag_outside = tag_outside,
dropout = 0.1,
hyperparameters = hyperparameters)
Attributes:
network (torch.nn.Module): network for Named Entity
Recognition task.
tag_encoder (sklearn.preprocessing.LabelEncoder): encoder for the
NER labels/tags.
transformer_model (transformers.PreTrainedModel): (Auto)Model derived from the
transformer.
transformer_tokenizer (transformers.PretrainedTokenizer): (Auto)Tokenizer
derived from the transformer.
transformer_config (transformers.PretrainedConfig): (Auto)Config derived from
the transformer.
train_losses (list): holds training losses, once the model has been
trained.
valid_loss (float): holds validation loss, once the model has been trained.
"""
def __init__(self,
transformer: str = 'bert-base-multilingual-uncased',
device: str = None,
tag_scheme: List[str] = [
'B-PER',
'I-PER',
'B-ORG',
'I-ORG',
'B-LOC',
'I-LOC',
'B-MISC',
'I-MISC'
],
tag_outside: str = 'O',
dataset_training: dict = None,
dataset_validation: dict = None,
max_len: int = 128,
network: torch.nn.Module = NERDANetwork,
dropout: float = 0.1,
hyperparameters: dict = {'epochs' : 4,
'warmup_steps' : 500,
'train_batch_size': 13,
'learning_rate': 0.0001},
tokenizer_parameters: dict = {'do_lower_case' : True},
validation_batch_size: int = 8,
num_workers: int = 1) -> None:
"""Initialize NERDA model
Args:
transformer (str, optional): which pretrained 'huggingface'
transformer to use.
device (str, optional): the desired device to use for computation.
If not provided by the user, we take a guess.
tag_scheme (List[str], optional): All available NER
tags for the given data set EXCLUDING the special outside tag,
that is handled separately.
tag_outside (str, optional): the value of the special outside tag.
Defaults to 'O'.
dataset_training (dict, optional): the training data. Must consist
of 'sentences': word-tokenized sentences and 'tags': corresponding
NER tags. You can look at examples of, how the dataset should
look like by invoking functions get_dane_data() or get_conll_data().
Defaults to None, in which case the English CoNLL-2003 data set is used.
dataset_validation (dict, optional): the validation data. Must consist
of 'sentences': word-tokenized sentences and 'tags': corresponding
NER tags. You can look at examples of, how the dataset should
look like by invoking functions get_dane_data() or get_conll_data().
Defaults to None, in which case the English CoNLL-2003 data set
is used.
max_len (int, optional): the maximum sentence length (number of
tokens after applying the transformer tokenizer) for the transformer.
Sentences are truncated accordingly. Look at your data to get an
impression of, what could be a meaningful setting. Also be aware
that many transformers have a maximum accepted length. Defaults
to 128.
network (torch.nn.module, optional): network to be trained. Defaults
to a default generic `NERDANetwork`. Can be replaced with your own
customized network architecture. It must however take the same
arguments as `NERDANetwork`.
dropout (float, optional): dropout probability. Defaults to 0.1.
hyperparameters (dict, optional): Hyperparameters for the model. Defaults
to {'epochs' : 3, 'warmup_steps' : 500, 'train_batch_size': 16,
'learning_rate': 0.0001}.
tokenizer_parameters (dict, optional): parameters for the transformer
tokenizer. Defaults to {'do_lower_case' : True}.
validation_batch_size (int, optional): batch size for validation. Defaults
to 8.
num_workers (int, optional): number of workers for data loader.
"""
# set device automatically if not provided by user.
if device is None:
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Device automatically set to:", self.device)
else:
self.device = device
print("Device set to:", self.device)
self.tag_scheme = tag_scheme
self.tag_outside = tag_outside
self.transformer = transformer
self.dataset_training = dataset_training
self.dataset_validation = dataset_validation
self.hyperparameters = hyperparameters
self.tag_outside = tag_outside
self.tag_scheme = tag_scheme
tag_complete = [tag_outside] + tag_scheme
# fit encoder to _all_ possible tags.
self.max_len = max_len
self.tag_encoder = sklearn.preprocessing.LabelEncoder()
self.tag_encoder.fit(tag_complete)
self.transformer_model = AutoModel.from_pretrained(transformer)
self.transformer_tokenizer = AutoTokenizer.from_pretrained(transformer, **tokenizer_parameters)
self.transformer_config = AutoConfig.from_pretrained(transformer)
self.network = NERDANetwork(self.transformer_model, self.device, len(tag_complete), dropout = dropout)
self.network.to(self.device)
self.validation_batch_size = validation_batch_size
self.num_workers = num_workers
self.train_losses = []
self.valid_loss = np.nan
def train(self) -> str:
"""Train Network
Trains the network from the NERDA model specification.
Returns:
str: a message saying if the model was trained succesfully.
The network in the 'network' attribute is trained as a
side-effect. Training losses and validation loss are saved
in 'training_losses' and 'valid_loss'
attributes respectively as side-effects.
"""
network, train_losses, valid_loss = train_model(network = self.network,
tag_encoder = self.tag_encoder,
tag_outside = self.tag_outside,
transformer_tokenizer = self.transformer_tokenizer,
transformer_config = self.transformer_config,
dataset_training = self.dataset_training,
dataset_validation = self.dataset_validation,
validation_batch_size = self.validation_batch_size,
max_len = self.max_len,
device = self.device,
num_workers = self.num_workers,
**self.hyperparameters)
# attach as attributes to class
setattr(self, "network", network)
setattr(self, "train_losses", train_losses)
setattr(self, "valid_loss", valid_loss)
return "Model trained successfully"
def load_network_from_file(self, model_path = "model.bin") -> str:
"""Load Pretrained NERDA Network from file
Loads weights for a pretrained NERDA Network from file.
Args:
model_path (str, optional): Path for model file.
Defaults to "model.bin".
Returns:
str: message telling if weights for network were
loaded succesfully.
"""
# TODO: change assert to Raise.
assert os.path.exists(model_path), "File does not exist. You can download network with download_network()"
self.network.load_state_dict(torch.load(model_path, map_location = torch.device(self.device)))
return f'Weights for network loaded from {model_path}'
def predict(self, sentences: List[List[str]], **kwargs) -> List[List[str]]:
"""Predict Named Entities in Word-Tokenized Sentences
Predicts word-tokenized sentences with trained model.
Args:
sentences (List[List[str]]): word-tokenized sentences.
kwargs: arbitrary keyword arguments. For instance
'batch_size' and 'num_workers'.
Returns:
List[List[str]]: Predicted tags for sentences - one
predicted tag/entity per word token.
"""
return predict(network = self.network,
sentences = sentences,
transformer_tokenizer = self.transformer_tokenizer,
transformer_config = self.transformer_config,
max_len = self.max_len,
device = self.device,
tag_encoder = self.tag_encoder,
tag_outside = self.tag_outside,
**kwargs)
def predict_text(self, text: str, **kwargs) -> list:
"""Predict Named Entities in a Text
Args:
text (str): text to predict entities in.
kwargs: arbitrary keyword arguments. For instance
'batch_size' and 'num_workers'.
Returns:
tuple: word-tokenized sentences and predicted
tags/entities.
"""
return predict_text(network = self.network,
text = text,
transformer_tokenizer = self.transformer_tokenizer,
transformer_config = self.transformer_config,
max_len = self.max_len,
device = self.device,
tag_encoder = self.tag_encoder,
tag_outside = self.tag_outside,
**kwargs)
def evaluate_performance(self, dataset: dict, **kwargs) -> pd.DataFrame:
"""Evaluate Performance
Evaluates the performance of the model on an arbitrary
data set.
Args:
dataset (dict): Data set that must consist of
'sentences' and NER'tags'. You can look at examples
of, how the dataset should look like by invoking functions
get_dane_data() or get_conll_data().
kwargs: arbitrary keyword arguments for predict. For
instance 'batch_size' and 'num_workers'.
Returns:
DataFrame with performance numbers, F1-scores.
"""
tags_predicted = self.predict(dataset.get('sentences'),
**kwargs)
f1 = compute_f1_scores(y_pred = tags_predicted,
y_true = dataset.get('tags'),
labels = self.tag_scheme,
average = None)
# create DataFrame with performance scores (=F1)
df = list(zip(self.tag_scheme, f1[2]))
df = pd.DataFrame(df, columns = ['Level', 'F1-Score'])
# compute MICRO-averaged F1-scores and add to table.
f1_micro = compute_f1_scores(y_pred = tags_predicted,
y_true = dataset.get('tags'),
labels = self.tag_scheme,
average = 'micro')
f1_micro = pd.DataFrame({'Level' : ['AVG_MICRO'], 'F1-Score': [f1_micro[2]]})
df = df.append(f1_micro)
# compute MACRO-averaged F1-scores and add to table.
f1_macro = compute_f1_scores(y_pred = tags_predicted,
y_true = dataset.get('tags'),
labels = self.tag_scheme,
average = 'macro')
f1_macro = pd.DataFrame({'Level' : ['AVG_MACRO'], 'F1-Score': [f1_macro[2]]})
df = df.append(f1_macro)
return df
| [
"torch.cuda.is_available",
"torch.device"
] | 1.7.1 | Varun221/NERDA | a39900fd29c65465ac22f1a002c2eafef568258e |
1.7 | """
General utility functions
Author: Shengyu Huang
Last modified: 30.11.2020
"""
import os,re,sys,json,yaml,random, argparse, torch, pickle
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import minkowski
_EPS = 1e-7 # To prevent division by zero
class Logger:
def __init__(self, path):
self.path = path
self.fw = open(self.path+'/log','a')
def write(self, text):
self.fw.write(text)
self.fw.flush()
def close(self):
self.fw.close()
def save_obj(obj, path ):
"""
save a dictionary to a pickle file
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_obj(path):
"""
read a dictionary from a pickle file
"""
with open(path, 'rb') as f:
return pickle.load(f)
def load_config(path):
"""
Loads config file:
Args:
path (str): path to the config file
Returns:
config (dict): dictionary of the configuration parameters, merge sub_dicts
"""
with open(path,'r') as f:
cfg = yaml.safe_load(f)
config = dict()
for key, value in cfg.items():
for k,v in value.items():
config[k] = v
return config
def setup_seed(seed):
"""
fix random seed for deterministic training
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def square_distance(src, dst, normalised = False):
"""
Calculate Euclid distance between each two points.
Args:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Returns:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
if(normalised):
dist += 2
else:
dist += torch.sum(src ** 2, dim=-1)[:, :, None]
dist += torch.sum(dst ** 2, dim=-1)[:, None, :]
dist = torch.clamp(dist, min=1e-12, max=None)
return dist
def validate_gradient(model):
"""
Confirm all the gradients are non-nan and non-inf
"""
for name, param in model.named_parameters():
if param.grad is not None:
if torch.any(torch.isnan(param.grad)):
return False
if torch.any(torch.isinf(param.grad)):
return False
return True
def natural_key(string_):
"""
Sort strings by numbers in the name
"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] | [
"torch.cuda.manual_seed_all",
"torch.isnan",
"torch.clamp",
"torch.manual_seed",
"torch.isinf",
"torch.sum"
] | 1.7.1 | ShengyuH/PredateOverlap | 770c3063399f08b3836935212ab4c84d355b4704 |
1.7 | """ 3-d rigid body transformation group
"""
import torch
def identity(batch_size):
return torch.eye(3, 4)[None, ...].repeat(batch_size, 1, 1)
def inverse(g):
""" Returns the inverse of the SE3 transform
Args:
g: (B, 3/4, 4) transform
Returns:
(B, 3, 4) matrix containing the inverse
"""
# Compute inverse
rot = g[..., 0:3, 0:3]
trans = g[..., 0:3, 3]
inverse_transform = torch.cat([rot.transpose(-1, -2), rot.transpose(-1, -2) @ -trans[..., None]], dim=-1)
return inverse_transform
def concatenate(a, b):
"""Concatenate two SE3 transforms,
i.e. return a@b (but note that our SE3 is represented as a 3x4 matrix)
Args:
a: (B, 3/4, 4)
b: (B, 3/4, 4)
Returns:
(B, 3/4, 4)
"""
rot1 = a[..., :3, :3]
trans1 = a[..., :3, 3]
rot2 = b[..., :3, :3]
trans2 = b[..., :3, 3]
rot_cat = rot1 @ rot2
trans_cat = rot1 @ trans2[..., None] + trans1[..., None]
concatenated = torch.cat([rot_cat, trans_cat], dim=-1)
return concatenated
def transform(g, a, normals=None):
""" Applies the SE3 transform
Args:
g: SE3 transformation matrix of size ([1,] 3/4, 4) or (B, 3/4, 4)
a: Points to be transformed (N, 3) or (B, N, 3)
normals: (Optional). If provided, normals will be transformed
Returns:
transformed points of size (N, 3) or (B, N, 3)
"""
R = g[..., :3, :3] # (B, 3, 3)
p = g[..., :3, 3] # (B, 3)
if len(g.size()) == len(a.size()):
b = torch.matmul(a, R.transpose(-1, -2)) + p[..., None, :]
else:
raise NotImplementedError
b = R.matmul(a.unsqueeze(-1)).squeeze(-1) + p # No batch. Not checked
if normals is not None:
rotated_normals = normals @ R.transpose(-1, -2)
return b, rotated_normals
else:
return b
| [
"torch.cat",
"torch.eye"
] | 1.7.1 | ShengyuH/PredateOverlap | 770c3063399f08b3836935212ab4c84d355b4704 |
1.0 | import torch.nn as nn
import torch
class Yolo_head(nn.Module):
def __init__(self, nC, anchors, stride):
super(Yolo_head, self).__init__()
self.__anchors = anchors
self.__nA = len(anchors)
self.__nC = nC
self.__stride = stride
def forward(self, p):
bs, nG = p.shape[0], p.shape[-1]
p = p.view(bs, self.__nA, 5 + self.__nC + 25, nG, nG).permute(0, 3, 4, 1, 2)
p_de = self.__decode(p.clone())
return (p, p_de)
def __decode(self, p):
batch_size, output_size = p.shape[:2]
device = p.device
stride = self.__stride
anchors = (1.0 * self.__anchors).to(device)
conv_raw_dxdy = p[:, :, :, :, 0:2]
conv_raw_dwdh = p[:, :, :, :, 2:4]
conv_raw_conf = p[:, :, :, :, 4:5]
conv_raw_prob = p[:, :, :, :, 5:25]
conv_raw_ratio = p[:, :, :, :, 25:]
y = torch.arange(0, output_size).unsqueeze(1).repeat(1, output_size)
x = torch.arange(0, output_size).unsqueeze(0).repeat(output_size, 1)
grid_xy = torch.stack([x, y], dim=-1)
grid_xy = grid_xy.unsqueeze(0).unsqueeze(3).repeat(batch_size, 1, 1, 3, 1).float().to(device)
pred_xy = (torch.sigmoid(conv_raw_dxdy) + grid_xy) * stride
pred_wh = (torch.exp(conv_raw_dwdh) * anchors) * stride
pred_xywh = torch.cat([pred_xy, pred_wh], dim=-1)
pred_conf = torch.sigmoid(conv_raw_conf)
pred_prob = torch.sigmoid(conv_raw_prob)
pred_ratio = torch.sigmoid(conv_raw_ratio)
pred_bbox = torch.cat([pred_xywh, pred_conf, pred_prob, pred_ratio], dim=-1)
return pred_bbox.view(-1, 5 + self.__nC + 25) if not self.training else pred_bbox
| [
"torch.sigmoid",
"torch.cat",
"torch.stack",
"torch.arange",
"torch.exp"
] | 1.0.0 | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 |
1.5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dropblock import DropBlock2D
class BEV_Unet(nn.Module):
def __init__(self,n_class,n_height,dilation = 1,group_conv=False,input_batch_norm = False,dropout = 0.,circular_padding = False, dropblock = True):
super(BEV_Unet, self).__init__()
self.n_class = n_class
self.n_height = n_height
self.network = UNet(n_class*n_height,n_height,dilation,group_conv,input_batch_norm,dropout,circular_padding,dropblock)
def forward(self, x):
x = self.network(x)
x = x.permute(0,2,3,1)
new_shape = list(x.size())[:3] + [self.n_height,self.n_class]
x = x.view(new_shape)
x = x.permute(0,4,1,2,3)
return x
class UNet(nn.Module):
def __init__(self, n_class,n_height,dilation,group_conv,input_batch_norm, dropout,circular_padding,dropblock):
super(UNet, self).__init__()
self.inc = inconv(n_height, 64, dilation, input_batch_norm, circular_padding)
self.down1 = down(64, 128, dilation, group_conv, circular_padding)
self.down2 = down(128, 256, dilation, group_conv, circular_padding)
self.down3 = down(256, 512, dilation, group_conv, circular_padding)
self.down4 = down(512, 512, dilation, group_conv, circular_padding)
self.up1 = up(1024, 256, circular_padding, group_conv = group_conv, use_dropblock=dropblock, drop_p=dropout)
self.up2 = up(512, 128, circular_padding, group_conv = group_conv, use_dropblock=dropblock, drop_p=dropout)
self.up3 = up(256, 64, circular_padding, group_conv = group_conv, use_dropblock=dropblock, drop_p=dropout)
self.up4 = up(128, 64, circular_padding, group_conv = group_conv, use_dropblock=dropblock, drop_p=dropout)
self.dropout = nn.Dropout(p=0. if dropblock else dropout)
self.outc = outconv(64, n_class)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(self.dropout(x))
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch,group_conv,dilation=1):
super(double_conv, self).__init__()
if group_conv:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1,groups = min(out_ch,in_ch)),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1,groups = out_ch),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class double_conv_circular(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch,group_conv,dilation=1):
super(double_conv_circular, self).__init__()
if group_conv:
self.conv1 = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=(1,0),groups = min(out_ch,in_ch)),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(out_ch, out_ch, 3, padding=(1,0),groups = out_ch),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True)
)
else:
self.conv1 = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=(1,0)),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(out_ch, out_ch, 3, padding=(1,0)),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
#add circular padding
x = F.pad(x,(1,1,0,0),mode = 'circular')
x = self.conv1(x)
x = F.pad(x,(1,1,0,0),mode = 'circular')
x = self.conv2(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch, dilation, input_batch_norm, circular_padding):
super(inconv, self).__init__()
if input_batch_norm:
if circular_padding:
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
double_conv_circular(in_ch, out_ch,group_conv = False,dilation = dilation)
)
else:
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
double_conv(in_ch, out_ch,group_conv = False,dilation = dilation)
)
else:
if circular_padding:
self.conv = double_conv_circular(in_ch, out_ch,group_conv = False,dilation = dilation)
else:
self.conv = double_conv(in_ch, out_ch,group_conv = False,dilation = dilation)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch, dilation, group_conv, circular_padding):
super(down, self).__init__()
if circular_padding:
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv_circular(in_ch, out_ch,group_conv = group_conv,dilation = dilation)
)
else:
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch,group_conv = group_conv,dilation = dilation)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, circular_padding, bilinear=True, group_conv=False, use_dropblock = False, drop_p = 0.5):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
elif group_conv:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2,groups = in_ch//2)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
if circular_padding:
self.conv = double_conv_circular(in_ch, out_ch,group_conv = group_conv)
else:
self.conv = double_conv(in_ch, out_ch,group_conv = group_conv)
self.use_dropblock = use_dropblock
if self.use_dropblock:
self.dropblock = DropBlock2D(block_size=7, drop_prob=drop_p)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
if self.use_dropblock:
x = self.dropblock(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x | [
"torch.nn.Dropout",
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.functional.pad"
] | 1.5.0 | isunLt/PolarSeg | 50b6df8d0a63aae1835377178baeaeb071b8f78d |
1.8 | import torch
import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip
import neural_network_lyapunov.utils as utils
import neural_network_lyapunov.mip_utils as mip_utils
import neural_network_lyapunov.relu_to_optimization as relu_to_optimization
class ControlAffineSystemConstraintReturn:
"""
The return type of
ControlPiecewiseAffineSystem::mixed_integer_constraints() function.
"""
def __init__(self):
# A MixedIntegerConstraintsReturn object with f as the output.
self.mip_cnstr_f = None
# A MixedIntegerConstraintsReturn object with the flat vector
# G.reshape((-1,)) as the output.
self.mip_cnstr_G = None
# The lower bound of f.
self.f_lo = None
# The upper bound of f.
self.f_up = None
# The lower bound of the flat vector G.reshape((-1,))
self.G_flat_lo = None
# The upper bound of the flat vector G.reshape((-1,))
self.G_flat_up = None
class ControlPiecewiseAffineSystem:
"""
Represent a continuous-time control-affine system
ẋ=f(x)+G(x)u
u_lo <= u <= u_up
Notice that the dynamics ẋ is an affine function of u.
We will assume that given u, ẋ is a (piecewise) affine function of x,
hence we can write the dynamics constraint using (mixed-integer) linear
constraints.
"""
def __init__(self, x_lo: torch.Tensor, x_up: torch.Tensor,
u_lo: torch.Tensor, u_up: torch.Tensor):
"""
Args:
x_lo, x_up: We will constrain the state to be within the box
x_lo <= x <= x_up.
u_lo, u_up: The input limits.
"""
assert (len(x_lo.shape) == 1)
assert (x_lo.shape == x_up.shape)
assert (torch.all(x_lo <= x_up))
self.x_lo = x_lo
self.x_up = x_up
assert (len(u_lo.shape) == 1)
assert (u_lo.shape == u_up.shape)
assert (torch.all(u_lo <= u_up))
self.u_lo = u_lo
self.u_up = u_up
@property
def dtype(self):
return self.x_lo.dtype
@property
def x_dim(self):
return self.x_lo.numel()
@property
def u_dim(self):
return self.u_lo.numel()
@property
def x_lo_all(self):
return self.x_lo.detach().numpy()
@property
def x_up_all(self):
return self.x_up.detach().numpy()
def mixed_integer_constraints(self) -> ControlAffineSystemConstraintReturn:
"""
Returns the mixed-integer linear constraints on f(x) and G(x), together
with the bounds on f(x) and G(x)
"""
raise NotImplementedError
def dynamics(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Computes ẋ=f(x)+G(x) * clamp(u, u_lo, u_up)
"""
u_clamped = torch.max(torch.min(u, self.u_up), self.u_lo)
return self.f(x) + self.G(x) @ u_clamped
def f(self, x):
"""
The dynamics is ẋ=f(x)+G(x)u
"""
raise NotImplementedError
def G(self, x):
"""
The dynamics is ẋ=f(x)+G(x)u
"""
raise NotImplementedError
class LinearSystem(ControlPiecewiseAffineSystem):
"""
A linear system ẋ = A*x+B*u
We create this system to test synthesizing control Lyapunov functions.
"""
def __init__(self, A: torch.Tensor, B: torch.Tensor, x_lo: torch.Tensor,
x_up: torch.Tensor, u_lo: torch.Tensor, u_up: torch.Tensor):
super(LinearSystem, self).__init__(x_lo, x_up, u_lo, u_up)
assert (A.shape == (self.x_dim, self.x_dim))
assert (B.shape == (self.x_dim, self.u_dim))
self.A = A
self.B = B
def mixed_integer_constraints(self):
# f = A*x
# G = B
ret = ControlAffineSystemConstraintReturn()
ret.mip_cnstr_f = gurobi_torch_mip.MixedIntegerConstraintsReturn()
ret.mip_cnstr_f.Aout_input = self.A
ret.mip_cnstr_G = gurobi_torch_mip.MixedIntegerConstraintsReturn()
ret.mip_cnstr_G.Cout = self.B.reshape((-1, ))
# Independent of method (IA/MIP/LP), they all compute the same range
# for f=A*x.
ret.f_lo, ret.f_up = mip_utils.compute_range_by_IA(
self.A, torch.zeros((self.x_dim, ), dtype=self.dtype), self.x_lo,
self.x_up)
ret.G_flat_lo = self.B.reshape((-1, ))
ret.G_flat_up = self.B.reshape((-1, ))
return ret
def f(self, x):
return self.A @ x
def G(self, x):
return self.B
class SecondOrderControlAffineSystem(ControlPiecewiseAffineSystem):
"""
A second-order system
q̇ = v
v̇ = a(x) + b(x)u
where the state is x = [q, v].
"""
def __init__(self, x_lo, x_up, u_lo, u_up):
super(SecondOrderControlAffineSystem,
self).__init__(x_lo, x_up, u_lo, u_up)
assert (self.x_dim % 2 == 0)
self.nq = int(self.x_dim / 2)
def a(self, x):
raise NotImplementedError
def b(self, x):
raise NotImplementedError
def f(self, x):
v = x[self.nq:]
return torch.cat((v, self.a(x)))
def G(self, x):
return torch.vstack((torch.zeros((self.nq, self.u_dim),
dtype=self.dtype), self.b(x)))
def _mixed_integer_constraints_v(self):
"""
Return the mixed-integer constraints on a(x) and b(x).reshape((-1,)).
"""
raise NotImplementedError
def mixed_integer_constraints(self):
mip_cnstr_a, mip_cnstr_b_flat, a_lo, a_up, b_lo, b_up = \
self._mixed_integer_constraints_v()
# We want mip_cnstr_f to be the same as mip_cnstr_a, except for the
# output constraint.
ret = ControlAffineSystemConstraintReturn()
ret.mip_cnstr_f = gurobi_torch_mip.MixedIntegerConstraintsReturn()
for field in mip_cnstr_a.__dict__.keys():
if field not in ("Aout_input", "Aout_slack", "Aout_binary",
"Cout"):
ret.mip_cnstr_f.__dict__[field] = mip_cnstr_a.__dict__[field]
def get_tensor(tensor, row, col):
# Return @p tensor if @p tensor is not None
# else return torch.zeros((row, col))
return tensor if tensor is not None else torch.zeros(
(row, col), dtype=self.dtype)
ret.mip_cnstr_f.Aout_input = torch.vstack(
(torch.hstack((torch.zeros((self.nq, self.nq), dtype=self.dtype),
torch.eye(self.nq, dtype=self.dtype))),
get_tensor(mip_cnstr_a.Aout_input, self.nq, self.x_dim)))
if mip_cnstr_a.Aout_slack is not None:
ret.mip_cnstr_f.Aout_slack = torch.vstack(
(torch.zeros((self.nq, mip_cnstr_a.Aout_slack.shape[1]),
dtype=self.dtype), mip_cnstr_a.Aout_slack))
if mip_cnstr_a.Aout_binary is not None:
ret.mip_cnstr_f.Aout_binary = torch.vstack(
(torch.zeros((self.nq, mip_cnstr_a.Aout_binary.shape[1]),
dtype=self.dtype), mip_cnstr_a.Aout_binary))
if mip_cnstr_a.Cout is not None:
ret.mip_cnstr_f.Cout = torch.cat(
(torch.zeros(self.nq, dtype=self.dtype), mip_cnstr_a.Cout))
ret.mip_cnstr_G = gurobi_torch_mip.MixedIntegerConstraintsReturn()
for field in mip_cnstr_b_flat.__dict__.keys():
if field not in ("Aout_input", "Aout_slack", "Aout_binary",
"Cout"):
ret.mip_cnstr_G.__dict__[field] = mip_cnstr_b_flat.__dict__[
field]
if mip_cnstr_b_flat.Aout_input is not None:
ret.mip_cnstr_G.Aout_input = torch.vstack(
(torch.zeros((self.nq * self.u_dim, self.nq),
dtype=self.dtype), mip_cnstr_b_flat.Aout_input))
if mip_cnstr_b_flat.Aout_slack is not None:
ret.mip_cnstr_G.Aout_slack = torch.vstack((torch.zeros(
(self.nq * self.u_dim, mip_cnstr_b_flat.Aout_slack.shape[1]),
dtype=self.dtype), mip_cnstr_b_flat.Aout_slack))
if mip_cnstr_b_flat.Aout_binary is not None:
ret.mip_cnstr_G.Aout_binary = torch.vstack((torch.zeros(
(self.nq * self.u_dim, mip_cnstr_b_flat.Aout_binary.shape[1]),
dtype=self.dtype), mip_cnstr_b_flat.Aout_binary))
if mip_cnstr_b_flat.Cout is not None:
ret.mip_cnstr_G.Cout = torch.cat(
(torch.zeros(self.nq * self.u_dim,
dtype=self.dtype), mip_cnstr_b_flat.Cout))
ret.f_lo = torch.cat((self.x_lo[self.nq:], a_lo))
ret.f_up = torch.cat((self.x_up[self.nq:], a_up))
ret.G_flat_lo = torch.cat((torch.zeros(self.nq * self.u_dim,
dtype=self.dtype), b_lo))
ret.G_flat_up = torch.cat((torch.zeros(self.nq * self.u_dim,
dtype=self.dtype), b_up))
return ret
class ReluSecondOrderControlAffineSystem(SecondOrderControlAffineSystem):
"""
A second order system, that
v̇=ϕ_a(x) + ϕ_b(x) * u
where ϕ_a, ϕ_b are both neural networks with (leaky) ReLU activation unit.
"""
def __init__(self, x_lo, x_up, u_lo, u_up, phi_a, phi_b,
method: mip_utils.PropagateBoundsMethod):
"""
Args:
phi_a: A neural network that maps x to ϕ_a(x).
phi_b: A neural network that maps x to a flat vector
[ϕ_b(x).row(0), ϕ_b(x).row(1), ..., ϕ_b(x).row(nq-1)]
method: The method to propagate the bounds in the ReLU networks phi_a
and phi_b.
"""
super(ReluSecondOrderControlAffineSystem,
self).__init__(x_lo, x_up, u_lo, u_up)
assert (phi_a[0].in_features == self.x_dim)
assert (phi_a[-1].out_features == self.nq)
assert (phi_b[0].in_features == self.x_dim)
assert (phi_b[-1].out_features == self.u_dim * self.nq)
self.phi_a = phi_a
self.phi_b = phi_b
self.method = method
self.relu_free_pattern_a = relu_to_optimization.ReLUFreePattern(
self.phi_a, self.dtype)
self.relu_free_pattern_b = relu_to_optimization.ReLUFreePattern(
self.phi_b, self.dtype)
def a(self, x):
return self.phi_a(x)
def b(self, x):
return self.phi_b(x).reshape((self.nq, self.u_dim))
def _mixed_integer_constraints_v(self):
mip_cnstr_a = self.relu_free_pattern_a.output_constraint(
self.x_lo, self.x_up, self.method)
mip_cnstr_b_flat = self.relu_free_pattern_b.output_constraint(
self.x_lo, self.x_up, self.method)
a_lo = mip_cnstr_a.nn_output_lo
a_up = mip_cnstr_a.nn_output_up
b_lo = mip_cnstr_b_flat.nn_output_lo
b_up = mip_cnstr_b_flat.nn_output_up
return mip_cnstr_a, mip_cnstr_b_flat, a_lo, a_up, b_lo, b_up
def train_control_affine_forward_model(forward_model_f,
forward_model_G,
x_equ,
u_equ,
model_dataset,
num_epochs,
lr,
batch_size=50,
verbose=True):
"""
Helper function to train neural networks that approximate continuous time
dynamics as
ẋ = ϕ_f(x) + ϕ_G(x) u - ϕ_f(x*) - ϕ_G(x*) u*
@param forward_model_f Feedforward network ϕ_f
@param forward_model_G Feedforward network ϕ_G
@param x_equ Tensor shape (x_dim,) with the system equilibrium state
@param u_equ Tensor shape (u_dim,) with the system equilibrium control
@param model_dataset TensorDataset with data, label = ([x|u], ẋ)
@param num_epochs int number of training epochs
@param lr float learning rate
@param batch_size int
"""
x_dim = x_equ.shape[0]
u_dim = u_equ.shape[0]
def compute_x_dot(forward_model_f, network_input, forward_model_G):
x, u = torch.split(network_input, [x_dim, u_dim], dim=1)
x_dot = forward_model_f(x) +\
(forward_model_G(x).view((x.shape[0], x_dim, u_dim)) @
u.unsqueeze(2)).squeeze(2) -\
forward_model_f(x_equ) -\
forward_model_G(x_equ).view((x_dim, u_dim)) @ u_equ
return x_dot
utils.train_approximator(
model_dataset,
forward_model_f,
compute_x_dot,
batch_size=batch_size,
num_epochs=num_epochs,
lr=lr,
additional_variable=list(forward_model_G.parameters()),
output_fun_args=dict(forward_model_G=forward_model_G),
verbose=verbose)
| [
"torch.zeros",
"torch.cat",
"torch.min",
"torch.split",
"torch.all",
"torch.eye"
] | 1.8 | StanfordASL/neural-network-lyapunov | 9e5db1c7f91b42df729026c9aa8575bc126f66b6 |
1.0 | import math
import torch
import torch.optim as optim
import horovod.torch as hvd
import numpy as np
from horovod.torch.mpi_ops import allgather_async
from legacy.utils import (ComputeA, ComputeG)
from legacy.utils import update_running_avg
from legacy.utils import try_contiguous
from legacy.utils import cycle
from legacy.utils import get_block_boundary
from legacy.utils import sparsification
import logging
import tcmm
import time
import torchsso
logger = logging.getLogger()
estimate_time_A = [0.00013175010681152344, 0.0011579513549804688, 0.0011622905731201172, 0.001163339614868164, 0.0011631011962890624, 0.0011394977569580077, 0.0008266210556030273, 0.000829005241394043, 0.0008294343948364258, 0.0008281707763671875, 0.0008249759674072265, 0.0008289337158203125, 0.0008284330368041992, 0.0008333921432495117, 0.0008373737335205078, 0.0008400678634643555, 0.0008365631103515625, 0.0008355617523193359, 0.000834512710571289, 0.0008332252502441407, 0.006051778793334961, 0.006056976318359375, 0.006056952476501465, 0.006049537658691406, 0.006057143211364746, 0.006056356430053711, 0.006053018569946289, 0.006051158905029297, 0.006050491333007812, 0.006055474281311035, 0.006048965454101563, 0.006051397323608399, 0.006054568290710449, 0.0060559272766113285, 0.006066560745239258, 0.006073403358459473, 0.006061959266662598, 0.006053304672241211, 0.03182971477508545, 0.03203625679016113, 0.032034444808959964, 0.03211054801940918, 0.032068943977355956, 0.032073044776916505, 0.03207738399505615, 0.032068395614624025, 0.03203463554382324, 0.03205530643463135, 0.03205585479736328, 0.032103443145751955, 0.03206741809844971, 0.032056117057800294, 0.032047080993652347, 0.032123994827270505, 0.03212919235229492, 0.0003176212310791016]
estimate_time_G = [0.00015666484832763672, 0.00014612674713134765, 0.00014829635620117188, 0.00016701221466064453, 0.00023555755615234375, 0.00023458003997802734, 0.00023586750030517577, 0.00023624897003173828, 0.00014681816101074218, 0.00014879703521728516, 0.00014846324920654298, 0.00014934539794921874, 0.0001502513885498047, 0.00014581680297851563, 0.00014772415161132813, 0.00014641284942626954, 0.0001462697982788086, 0.00014600753784179687, 0.00014696121215820312, 0.00018224716186523437, 0.000179290771484375, 0.0001822948455810547, 0.0001821279525756836, 0.00017876625061035155, 0.00017430782318115235, 0.0001788616180419922, 0.00018439292907714843, 0.00016841888427734374, 0.00018928050994873046, 0.00018115043640136718, 0.00017838478088378907, 0.0001840353012084961, 0.00021533966064453126, 0.0001862049102783203, 0.0001873493194580078, 0.00019392967224121093, 0.00018782615661621093, 0.0002820253372192383, 0.0002731800079345703, 0.00027272701263427737, 0.0002585411071777344, 0.000267481803894043, 0.0002699851989746094, 0.00027697086334228517, 0.0002799272537231445, 0.00028808116912841796, 0.00027093887329101565, 0.0002554655075073242, 0.00030405521392822265, 0.00027341842651367186, 0.0002665519714355469, 0.00025577545166015624, 0.00025708675384521483, 0.0002652406692504883, 0.0002630710601806641, 0.00010921955108642579]
class KFAC(optim.Optimizer):
"""KFAC Distributed Gradient Preconditioner
Computes the natural gradient of a model in place with a layer-wise
FIM approximation. Layer computations are distributed across workers
using Horovod.
Usage:
optimizer = optim.SGD(model.parameters(), ...)
optimizer = hvd.DistributedOptimizer(optimizer, ...)
preconditioner = KFAC(model, ...)
...
for i, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.synchronize()
preconditioner.step()
with optimizer.skip_synchronize():
optimizer.step()
Args:
model (nn): Torch model to precondition
lr (float, optional): learning rate (default: 0.1)
factor_decay (float, optional): running average coefficient for Kronecker
factors (default: 0.95)
damping (float, optional): Tikhonov damping parameter (default: 0.001)
kl_clip (float, optional): clipping parameter for gradient scaling
(default: 0.001)
fac_update_freq (int, optional): iterations between calculating and
updating the running average of the Kronecker factors (default: 10)
kfac_update_freq (int, optional): iterations between applying gradient
preconditioning (default: 100)
batch_averaged (bool, optional): boolean representing if the gradient
is alrady averaged across the batches (default: True)
diag_blocks (int, optional): Experimental: number of diagonal blocks to
approximate the Kronecker factor eigendecomposition with.
`diag_blocks=1` computes the eigendecomposition of the entire factor
(default: 1)
diag_warmup (int, optional): number of epochs to wait before starting
the block diagonal factor approximation (default: 0)
distribute_layer_factors (bool, optional): if `True`, computes factors A
and G on different workers else computes A and G for a single layer
on the same worker. If `None`, determines best value based on layer
count (default: None)
"""
def __init__(self,
model,
lr=0.1,
hook_enabled=True,
factor_decay=0.95,
damping=0.001,
kl_clip=0.001,
fac_update_freq=10,
kfac_update_freq=100,
batch_averaged=True,
diag_blocks=1,
diag_warmup=0,
distribute_layer_factors=None,
sparse=False,
sparse_ratio=0.01,
exclude_parts=''):
#exclude_parts='CommunicateInverse,ComputeInverse,CommunicateFactor,ComputeFactor'):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < factor_decay <= 1:
raise ValueError("Invalid factor decay rate: {}".format(factor_decay))
if not 0.0 < damping:
raise ValueError("Invalid damping: {}".format(damping))
if not 0.0 < kl_clip:
raise ValueError("Invalid clipping value: {}".format(kl_clip))
if not 0 < fac_update_freq:
raise ValueError("Invalid factor update frequency: {}".format(fac_update_freq))
if not 0 < kfac_update_freq:
raise ValueError("Invalid K-FAC update frequency: {}".format(kfac_update_freq))
if not 0 == kfac_update_freq % fac_update_freq:
print("WARNING: it is suggested that kfac_update_freq be a multiple of fac_update_freq")
if not 0 < diag_blocks:
raise ValueError("Invalid diagonal block approx count: {}".format(diag_blocks))
if not 0 <= diag_blocks:
raise ValueError("Invalid diagonal block approx count: {}".format(diag_blocks))
if not 1 == diag_blocks:
print("WARNING: diag_blocks > 1 is experimental and may give poor results.")
# For compatibility with `KFACParamScheduler`
defaults = dict(lr=lr,
damping=damping,
fac_update_freq=fac_update_freq,
kfac_update_freq=kfac_update_freq)
super(KFAC, self).__init__(model.parameters(), defaults)
self.computeA = ComputeA()
self.computeG = ComputeG()
self.known_modules = {'Linear', 'Conv2d'}
self.modules = []
self.module_names = []
# register hooks for known modules
self.hook_enabled = hook_enabled
self._register_modules(model)
# tcmm communicator
self.communicator = tcmm.Communicator(hvd.rank(), hvd.size(), 1)
self.steps = 0
# Dictionaries keyed by `module` to storing the factors and inverse factors
self.m_a, self.m_g = {}, {}
self.m_A, self.m_G = {}, {}
self.m_inv_A, self.m_inv_G = {}, {}
self.module_ranks = None
self.sparse = sparse
self.sparse_ratio = sparse_ratio
self.residualsA, self.residualsG = {}, {}
self.factor_decay = factor_decay
self.kl_clip = kl_clip
self.fac_update_freq = fac_update_freq
self.kfac_update_freq = kfac_update_freq
self.diag_blocks = diag_blocks
self.diag_warmup = diag_warmup
self.batch_averaged = batch_averaged
self.exclude_communicate_inverse = True if exclude_parts.find('CommunicateInverse') >=0 else False
self.exclude_compute_inverse = True if exclude_parts.find('ComputeInverse') >=0 else False
self.exclude_communicate_factor = True if exclude_parts.find('CommunicateFactor') >=0 else False
self.exclude_compute_factor = True if exclude_parts.find('ComputeFactor') >=0 else False
# Compute ideal value for `distribute_layer_factors` based on
# registered module count
if distribute_layer_factors is None:
self.distribute_layer_factors = True \
if hvd.size() > len(self.modules) else False
else:
self.distribute_layer_factors = distribute_layer_factors
self.eps = 1e-10 # for numerical stability
self.rank_iter = cycle(list(range(hvd.size())))
def set_hook_enabled(self, mode=True):
self.hook_enabled = mode
def _save_input(self, module, input):
"""Hook for saving layer input"""
if self.hook_enabled and torch.is_grad_enabled() and self.steps % self.fac_update_freq == 0:
self.m_a[module] = input[0].data
def _save_grad_output(self, module, grad_input, grad_output):
"""Hook for saving gradient w.r.t output"""
if self.hook_enabled and self.steps % self.fac_update_freq == 0:
self.m_g[module] = grad_output[0].data
def _register_modules(self, model):
"""Register hooks to all supported layers in the model"""
name_idx = 0
for module in model.modules():
classname = module.__class__.__name__
if classname in self.known_modules:
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
module_name = 'module_name_%s_%d' % (classname, name_idx)
self.module_names.append(module_name)
name_idx += 1
def _init_A(self, factor, module):
"""Initialize memory for factor A and its inverse"""
self.m_A[module] = torch.diag(factor.new_ones(factor.shape[0]))
self.m_inv_A[module] = factor.new_zeros(factor.shape)
def _update_module_A(self, module):
A = self.computeA(self.m_a[module], module)
if self.steps == 0:
self._init_A(A, module)
update_running_avg(A, self.m_A[module], self.factor_decay)
def _update_A(self):
"""Compute and update factor A for all modules"""
for module in self.modules:
self._update_module_A(module)
def _init_G(self, factor, module):
"""Initialize memory for factor G and its eigendecomp"""
self.m_G[module] = torch.diag(factor.new_ones(factor.shape[0]))
self.m_inv_G[module] = factor.new_zeros(factor.shape)
def _update_module_G(self, module):
G = self.computeG(self.m_g[module], module, self.batch_averaged)
if self.steps == 0:
self._init_G(G, module)
update_running_avg(G, self.m_G[module], self.factor_decay)
def _update_G(self):
"""Compute and update factor G for all modules"""
for module in self.modules:
self._update_module_G(module)
def _add_value_to_diagonal(self, X, value):
return X.add_(torch.diag(X.new(X.shape[0]).fill_(value)))
def _update_inverse_A(self, module, rank):
"""Compute inverse of A for module on specified worker"""
if hvd.rank() == rank:
block = self._add_value_to_diagonal(self.m_A[module], self.damping)
self.m_inv_A[module] = torchsso.utils.inv(block)
def _update_inverse_G(self, module, rank):
"""Compute inverse of G for module on specified worker"""
if hvd.rank() == rank:
block = self._add_value_to_diagonal(self.m_G[module], self.damping)
self.m_inv_G[module] = torchsso.utils.inv(block)
def _get_grad(self, module):
"""Get formated gradient of module
Args:
module: module/layer to get gradient of
Returns:
Formatted gradient with shape [output_dim, input_dim] for module
"""
if module.__class__.__name__ == 'Conv2d':
# n_filters * (in_c * kw * kh)
grad = module.weight.grad.data.view(module.weight.grad.data.size(0), -1)
else:
grad = module.weight.grad.data
if module.bias is not None:
grad = torch.cat([grad, module.bias.grad.data.view(-1, 1)], 1)
return grad
def _get_preconditioned_grad(self, module, grad):
"""Precondition gradient of module
Args:
module: module to compute preconditioned gradient for
grad: formatted gradient from `_get_grad()`
Returns:
preconditioned gradient with same shape as `grad`
"""
v = self.m_inv_G[module] @ grad @ self.m_inv_A[module]
if module.bias is not None:
v = [v[:, :-1], v[:, -1:]]
v[0] = v[0].view(module.weight.grad.data.size()) # weight
v[1] = v[1].view(module.bias.grad.data.size()) # bias
else:
v = [v.view(module.weight.grad.data.size())]
return v
def _update_scale_grad(self, updates):
"""Update the gradients in place and scale
Updates the gradients in-place for all modules using the preconditioned
gradients and scales the gradients.
Args:
updates (dict): dict of {module: precon_grad}
"""
vg_sum = 0
for module in self.modules:
v = updates[module]
vg_sum += (v[0] * module.weight.grad.data * self.lr ** 2).sum().item()
if module.bias is not None:
vg_sum += (v[1] * module.bias.grad.data * self.lr ** 2).sum().item()
if self.exclude_communicate_inverse:
nu = 1
else:
nu = min(1.0, math.sqrt(self.kl_clip / abs(vg_sum)))
for module in self.modules:
v = updates[module]
module.weight.grad.data.copy_(v[0])
module.weight.grad.data.mul_(nu)
if module.bias is not None:
module.bias.grad.data.copy_(v[1])
module.bias.grad.data.mul_(nu)
def step(self, closure=None, epoch=None):
"""Perform one K-FAC step
Note:
- this function should always be called before `optimizer.step()`
- gradients must be averaged across ranks before calling `step()`
Args:
closure: for compatibility with the base optimizer class.
`closure` is ignored by KFAC
epoch (int, optional): epoch to use for determining when to end
the `diag_warmup` period. `epoch` is not necessary if not using
`diag_warmup`
"""
# Update params, used for compatibilty with `KFACParamScheduler`
group = self.param_groups[0]
self.lr = group['lr']
self.damping = group['damping']
self.fac_update_freq = group['fac_update_freq']
self.kfac_update_freq = group['kfac_update_freq']
updates = {}
handles = []
if self.steps % self.fac_update_freq == 0:
if not self.exclude_compute_factor:
self._update_A()
self._update_G()
# assign factors on workers to compute inverse
self._generate_module_ranks()
#self._generate_uniform_ranks()
if self.steps % self.fac_update_freq == 0:
if not self.exclude_communicate_factor:
if hvd.size() > 1:
#self._reduce_symmetric_factors()
self._reduce_factors()
#self._allreduce_factors()
if self.steps % self.kfac_update_freq == 0:
stime = time.time()
for i, module in enumerate(self.modules):
rank_a, rank_g = self.module_ranks[module]
if not self.exclude_compute_inverse:
self._update_inverse_A(module, rank_a)
self._update_inverse_G(module, rank_g)
#logger.info("Step: inverse comp time %s on worker %s", time.time()-stime, hvd.rank())
if not self.exclude_communicate_inverse:
if hvd.size() > 1:
self._broadcast_inverse_factors()
#logger.info("Step: inverse comp+comm time %s on worker %s", time.time()-stime, hvd.rank())
for i, module in enumerate(self.modules):
grad = self._get_grad(module)
if not self.exclude_compute_factor:
precon_grad = self._get_preconditioned_grad(module, grad)
else:
precon_grad = grad
updates[module] = precon_grad
self._update_scale_grad(updates)
self.steps += 1
def _get_factor_shape(self):
shape_A = []
shape_G = []
for module in self.modules:
if module.__class__.__name__ == 'Linear':
dim_A = module.in_features
dim_G = module.out_features
elif module.__class__.__name__ == 'Conv2d':
dim_A = module.in_channels * np.prod(module.kernel_size)
dim_G = module.out_channels
if module.bias is not None:
dim_A += 1
shape_A.append(dim_A)
shape_G.append(dim_G)
return shape_A, shape_G
def _generate_module_ranks(self):
if self.module_ranks is not None:
return self.module_ranks
self.rank_iter.reset()
curr_rank = 0
module_ranks = {}
buckets = [0] * hvd.size()
shape_A = [self.m_A[module].shape[1] for module in self.modules]
shape_G = [self.m_G[module].shape[1] for module in self.modules]
# shape_A, shape_G = self._get_factor_shape()
if hvd.rank() == 0:
logger.info('module_shape of A:%s', shape_A)
logger.info('module_shape of G:%s', shape_G)
assigned_rank = 0
for i, module in enumerate(self.modules):
ranks_a = self.rank_iter.next(1)
#ranks_g = self.rank_iter.next(1)
ranks_g = self.rank_iter.next(1) if self.distribute_layer_factors else ranks_a
# debug: three-layer a group
#if i > 0 and i % 14 == 0:
# assigned_rank += 1
# assigned_rank %= hvd.size()
#ranks_a = (assigned_rank, )
#ranks_g = (assigned_rank, )
module_ranks[module] = (ranks_a[0], ranks_g[0])
buckets[ranks_a[0]] += shape_A[i]
buckets[ranks_g[0]] += shape_G[i]
self.module_ranks = module_ranks
if hvd.rank() == 0:
logger.info('module_ranks: %s', module_ranks.values())
logger.info('buckets: %s', buckets)
def _generate_uniform_ranks(self):
if self.module_ranks is not None:
return self.module_ranks
module_ranks = {}
buckets = [0] * hvd.size()
dimensions = []
module_factors = []
for i, m in enumerate(self.modules):
name = self.module_names[i]
a_dimension = self.m_A[m].shape[1]
g_dimension = self.m_G[m].shape[1]
#a_dimension = estimate_time_A[i]
#g_dimension = estimate_time_G[i]
#if hvd.rank() == 0:
# logger.info('A Name: %s, shape: %s', m, self.m_A[m].shape)
# logger.info('G Name: %s, shape: %s', m, self.m_G[m].shape)
dimensions.append(a_dimension)
module_factors.append(name+'-A')
dimensions.append(g_dimension)
module_factors.append(name+'-G')
descending_sorted_idx = np.argsort(dimensions)[::-1]
A_ranks = {}
G_ranks = {}
bi = 0
for i in descending_sorted_idx:
factor = module_factors[i]
dimension = dimensions[i]
m_i = self.module_names.index(factor[0:-2])
m = self.modules[m_i]
bi = np.argmin(buckets)
load = dimension * dimension # square
buckets[bi] += load
if factor[-1] == 'A':
A_ranks[m] = bi
else:
G_ranks[m] = bi
for m in self.modules:
module_ranks[m] = (A_ranks[m], G_ranks[m])
self.module_ranks = module_ranks
if hvd.rank() == 0:
logger.info('module_ranks: %s', module_ranks.values())
logger.info('buckets: %s', buckets)
return module_ranks
def _triu_vectorization(self, tensor):
triu_ind = torch.triu_indices(tensor.shape[0], tensor.shape[1])
triu_vector = tensor[triu_ind[0], triu_ind[1]]
return triu_ind, triu_vector
def _reduce_symmetric_factors(self):
for m in self.modules:
rank_a, rank_g = self.module_ranks[m]
# vectorization
triu_ind_a, triu_vector_a = self._triu_vectorization(self.m_A[m].data)
triu_ind_g, triu_vector_g = self._triu_vectorization(self.m_G[m].data)
# reduce
self.communicator.reduce(triu_vector_a, rank_a)
self.communicator.reduce(triu_vector_g, rank_g)
self.communicator.synchronize()
# recovery
if hvd.rank() == rank_a:
triu_vector_a.div_(hvd.size())
triu_vector_g.div_(hvd.size())
self.m_A[m][triu_ind_a[0], triu_ind_a[1]] = triu_vector_a
self.m_A[m][triu_ind_a[1], triu_ind_a[0]] = triu_vector_a
self.m_G[m][triu_ind_g[0], triu_ind_g[1]] = triu_vector_g
self.m_G[m][triu_ind_g[1], triu_ind_g[0]] = triu_vector_g
def _reduce_factors(self):
#raise NotImplementedError("Reduce op is not implemented by Horovod.")
for m in self.modules:
rank_a, rank_g = self.module_ranks[m]
self.communicator.reduce(self.m_A[m].data, rank_a)
self.communicator.reduce(self.m_G[m].data, rank_g)
self.communicator.synchronize()
for m in self.modules:
rank_a, rank_g = self.module_ranks[m]
if hvd.rank() == rank_a:
self.m_A[m] = self.m_A[m].data / hvd.size()
if hvd.rank() == rank_g:
self.m_G[m] = self.m_G[m].data / hvd.size()
def _allreduce_factors(self):
"""Allreduce the factors for all layers"""
#handles = []
#for m in self.modules:
# handles.append(hvd.allreduce_async_(self.m_A[m].data, op=hvd.Average))
# handles.append(hvd.allreduce_async_(self.m_G[m].data, op=hvd.Average))
#for handle in handles:
# hvd.synchronize(handle)
for m in self.modules:
self.communicator.allReduce(self.m_A[m].data)
self.communicator.allReduce(self.m_G[m].data)
self.communicator.synchronize()
for m in self.modules:
self.m_A[m] = self.m_A[m].data / hvd.size()
self.m_G[m] = self.m_G[m].data / hvd.size()
def _broadcast_inverse_factors(self):
handles = []
for i, m in enumerate(self.modules):
rank_a, rank_g = self.module_ranks[m]
name = self.module_names[i]
h = hvd.broadcast_async_(self.m_inv_A[m], rank_a, name=name+'inverseA')
handles.append(h)
h = hvd.broadcast_async_(self.m_inv_G[m], rank_g, name=name+'inverseG')
handles.append(h)
for handle in handles:
hvd.synchronize(handle)
| [
"torch.triu_indices",
"torch.is_grad_enabled"
] | 1.0 | lzhangbv/kfac_pytorch | 159e7ef9541bb960d79c438622780cdcc71b3210 |
1.0 | import math
import torch
import torch.optim as optim
import numpy as np
#import horovod.torch as hvd
import kfac.backend as backend # hvd -> backend.comm
from kfac.utils import (ComputeA, ComputeG)
from kfac.utils import update_running_avg
from kfac.utils import mat_inv
from kfac.kfac_preconditioner_inv import KFAC as KFAC_INV
import logging
logger = logging.getLogger()
class KFAC(KFAC_INV):
"""
Distributed Preconditioning Distributed K-FAC with explicit factor inversion
Refer to: Scalable K-FAC Training for Deep Neural Networks with Distributed Preconditioning (AAAI 2022?)
Args:
model (nn): Torch model
lr (float): learning rate (default: 0.1)
damping (float): Tikhonov damping parameter (default: 0.001)
fac_update_freq (int): iterations between update KFs (default: 1)
kfac_update_freq (int): iterations between update inverse gradient (default: 1)
kl_clip (float): clipping parameter for gradient scaling
factor_decay (float): running average coefficient for KFs
exclude_vocabulary_size: exclude the pre-softmax linear layer in the Transformer
hook_enabled (bool): enable the hook events to save the immediate states (a and g)
exclude_parts='': exclude CommunicateInverse,ComputeInverse,CommunicateFactor,ComputeFactor for time breakdowns
"""
def __init__(self,
model,
lr=0.1,
damping=0.001,
fac_update_freq=1,
kfac_update_freq=1,
kl_clip=0.001,
factor_decay=0.95,
exclude_vocabulary_size=None,
hook_enabled=True,
exclude_parts=''):
super(KFAC, self).__init__(model=model, lr=lr, damping=damping,
fac_update_freq=fac_update_freq,
kfac_update_freq=kfac_update_freq,
communicate_inverse_or_not=False, # force to comm_pred
kl_clip=kl_clip,
factor_decay=factor_decay,
exclude_vocabulary_size=exclude_vocabulary_size,
hook_enabled=hook_enabled,
exclude_parts=exclude_parts)
# schedule module ranks in the beginning
self.schedule_module_ranks()
### Compute a and g distributively
def _forward_hook_event(self, module, input):
"""Hook for saving input distributively"""
if self.hook_enabled and torch.is_grad_enabled() and self.steps % self.fac_update_freq == 0:
rank_a, _ = self.module_ranks[module]
if backend.comm.rank() == rank_a:
self.m_a[module] = input[0].data
def _backward_hook_event(self, module, grad_input, grad_output):
"""Hook for saving output gradient distributively"""
if self.hook_enabled and self.steps % self.fac_update_freq == 0:
_, rank_g = self.module_ranks[module]
if backend.comm.rank() == rank_g:
self.m_g[module] = grad_output[0].data
### Compute KFs distributively
def _compute_factors(self):
"""Compute As and Gs distributively"""
for module in self.modules:
rank_a, rank_g = self.module_ranks[module]
if backend.comm.rank() == rank_a:
A = self.computeA(self.m_a[module], module)
if self.steps == 0: # initialize memory as A=I
self.m_A[module] = torch.diag(A.new_ones(A.shape[0]))
update_running_avg(A, self.m_A[module], self.factor_decay)
if backend.comm.rank() == rank_g:
G = self.computeG(self.m_g[module], module, batch_averaged=True)
if self.steps == 0: # initialize memory as G=I
self.m_G[module] = torch.diag(G.new_ones(G.shape[0]))
update_running_avg(G, self.m_G[module], self.factor_decay)
### Communicate KFs
def _communicate_factors(self):
"""No KF communication"""
pass
### Compute Inverse KFs distributively
def _compute_inverse(self):
"""Compute inverse factors distributively"""
for module in self.modules:
rank_a, rank_g = self.module_ranks[module]
if backend.comm.rank() == rank_a:
# if self.steps == 0: # initialize memory as inv_A=0
# A = self.m_A[module]
# self.m_inv_A[module] = A.new_zeros(A.shape)
A = self._add_value_to_diagonal(self.m_A[module], self.damping)
self.m_inv_A[module] = mat_inv(A)
if backend.comm.rank() == rank_g:
# if self.steps == 0: # initialize memory as inv_G=0
# G = self.m_G[module]
# self.m_inv_G[module] = G.new_zeros(G.shape)
G = self._add_value_to_diagonal(self.m_G[module], self.damping)
self.m_inv_G[module] = mat_inv(G)
### Compute Preconditioned Gradients distributively
def _compute_pred(self):
"""Compute the preconditioned gradients distributively"""
assert not self.communicate_inverse_or_not # force to comm_pred
for module in self.modules:
rank_a, rank_g = self.module_ranks[module]
assert rank_a == rank_g
if backend.comm.rank() == rank_a:
grad = self._get_grad(module)
self.m_precon_grad[module] = self.m_inv_G[module] @ grad @ self.m_inv_A[module]
elif self.steps == 0: # initialize memory on other workers for broadcast
grad = self._get_grad(module)
self.m_precon_grad[module] = grad.new_zeros(grad.shape)
| [
"torch.is_grad_enabled"
] | 1.0 | lzhangbv/kfac_pytorch | 159e7ef9541bb960d79c438622780cdcc71b3210 |
0.4 | import os
import queue
import re
import time
import torch
import torch.multiprocessing as mp
from autokeras.bayesian import BayesianOptimizer
from autokeras.constant import Constant
from autokeras.nn.model_trainer import ModelTrainer
from autokeras.utils import pickle_to_file, pickle_from_file, verbose_print, get_system
class Searcher:
"""Class to search for neural architectures.
This class generate new architectures, call the trainer to train it, and update the Bayesian optimizer.
Attributes:
n_classes: Number of classes in the target classification task.
input_shape: Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape` (tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
verbose: Verbosity mode.
history: A list that stores the performance of model. Each element in it is a dictionary of 'model_id',
'loss', and 'metric_value'.
path: A string. The path to the directory for saving the searcher.
metric: An instance of the Metric subclasses.
loss: A function taking two parameters, the predictions and the ground truth.
generators: A list of generators used to initialize the search.
model_count: An integer. the total number of neural networks in the current searcher.
descriptors: A dictionary of all the neural network architectures searched.
trainer_args: A dictionary. The params for the constructor of ModelTrainer.
default_model_len: An integer. Number of convolutional layers in the initial architecture.
default_model_width: An integer. The number of filters in each layer in the initial architecture.
training_queue: A list of the generated architectures to be trained.
x_queue: A list of trained architectures not updated to the gpr.
y_queue: A list of trained architecture performances not updated to the gpr.
t_min: A float. The minimum temperature during simulated annealing.
bo: An instance of BayesianOptimizer.
"""
def __init__(self, n_output_node, input_shape, path, metric, loss, generators, verbose,
trainer_args=None,
default_model_len=None,
default_model_width=None,
t_min=None):
"""Initialize the Searcher.
Args:
n_output_node: An integer, the number of classes.
input_shape: A tuple. e.g. (28, 28, 1).
path: A string. The path to the directory to save the searcher.
metric: An instance of the Metric subclasses.
loss: A function taking two parameters, the predictions and the ground truth.
generators: A list of generators used to initialize the search.
verbose: A boolean. Whether to output the intermediate information to stdout.
trainer_args: A dictionary. The params for the constructor of ModelTrainer.
default_model_len: An integer. Number of convolutional layers in the initial architecture.
default_model_width: An integer. The number of filters in each layer in the initial architecture.
t_min: A float. The minimum temperature during simulated annealing.
"""
if trainer_args is None:
trainer_args = {}
self.n_classes = n_output_node
self.input_shape = input_shape
self.verbose = verbose
self.history = []
self.path = path
self.metric = metric
self.loss = loss
self.generators = generators
self.model_count = 0
self.descriptors = []
self.trainer_args = trainer_args
self.default_model_len = default_model_len if default_model_len is not None else Constant.MODEL_LEN
self.default_model_width = default_model_width if default_model_width is not None else Constant.MODEL_WIDTH
if 'max_iter_num' not in self.trainer_args:
self.trainer_args['max_iter_num'] = Constant.SEARCH_MAX_ITER
self.training_queue = []
self.x_queue = []
self.y_queue = []
if t_min is None:
t_min = Constant.T_MIN
self.bo = BayesianOptimizer(self, t_min, metric)
def load_model_by_id(self, model_id):
return pickle_from_file(os.path.join(self.path, str(model_id) + '.graph'))
def load_best_model(self):
return self.load_model_by_id(self.get_best_model_id())
def get_metric_value_by_id(self, model_id):
for item in self.history:
if item['model_id'] == model_id:
return item['metric_value']
return None
def get_best_model_id(self):
if self.metric.higher_better():
return max(self.history, key=lambda x: x['metric_value'])['model_id']
return min(self.history, key=lambda x: x['metric_value'])['model_id']
def replace_model(self, graph, model_id):
pickle_to_file(graph, os.path.join(self.path, str(model_id) + '.graph'))
def add_model(self, metric_value, loss, graph, model_id):
"""Append the information of evaluated architecture to history."""
if self.verbose:
print('\nSaving model.')
graph.clear_operation_history()
pickle_to_file(graph, os.path.join(self.path, str(model_id) + '.graph'))
ret = {'model_id': model_id, 'loss': loss, 'metric_value': metric_value}
self.history.append(ret)
# Update best_model text file
if model_id == self.get_best_model_id():
file = open(os.path.join(self.path, 'best_model.txt'), 'w')
file.write('best model: ' + str(model_id))
file.close()
if self.verbose:
idx = ['model_id', 'loss', 'metric_value']
header = ['Model ID', 'Loss', 'Metric Value']
line = '|'.join(x.center(24) for x in header)
print('+' + '-' * len(line) + '+')
print('|' + line + '|')
if self.history:
r = self.history[-1]
print('+' + '-' * len(line) + '+')
line = '|'.join(str(r[x]).center(24) for x in idx)
print('|' + line + '|')
print('+' + '-' * len(line) + '+')
return ret
def init_search(self):
"""Call the generators to generate the initial architectures for the search."""
if self.verbose:
print('\nInitializing search.')
for generator in self.generators:
graph = generator(self.n_classes, self.input_shape). \
generate(self.default_model_len, self.default_model_width)
model_id = self.model_count
self.model_count += 1
self.training_queue.append((graph, -1, model_id))
self.descriptors.append(graph.extract_descriptor())
if self.verbose:
print('Initialization finished.')
def search(self, train_data, test_data, timeout=60 * 60 * 24):
"""Run the search loop of training, generating and updating once.
The function will run the training and generate in parallel.
Then it will update the controller.
The training is just pop out a graph from the training_queue and train it.
The generate will call teh self.generate function.
The update will call the self.update function.
Args:
train_data: An instance of DataLoader.
test_data: An instance of Dataloader.
timeout: An integer, time limit in seconds.
"""
start_time = time.time()
torch.cuda.empty_cache()
if not self.history:
self.init_search()
# Start the new process for training.
graph, other_info, model_id = self.training_queue.pop(0)
if self.verbose:
print('\n')
print('+' + '-' * 46 + '+')
print('|' + 'Training model {}'.format(model_id).center(46) + '|')
print('+' + '-' * 46 + '+')
# Temporary solution to support GOOGLE Colab
if get_system() == Constant.SYS_GOOGLE_COLAB:
ctx = mp.get_context('fork')
else:
ctx = mp.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(target=train, args=(q, graph, train_data, test_data, self.trainer_args,
self.metric, self.loss, self.verbose, self.path))
try:
p.start()
# Do the search in current thread.
searched = False
generated_graph = None
generated_other_info = None
if not self.training_queue:
searched = True
remaining_time = timeout - (time.time() - start_time)
generated_other_info, generated_graph = self.generate(remaining_time, q)
new_model_id = self.model_count
self.model_count += 1
self.training_queue.append((generated_graph, generated_other_info, new_model_id))
self.descriptors.append(generated_graph.extract_descriptor())
remaining_time = timeout - (time.time() - start_time)
if remaining_time <= 0:
raise TimeoutError
metric_value, loss, graph = q.get(timeout=remaining_time)
if self.verbose and searched:
verbose_print(generated_other_info, generated_graph, new_model_id)
if metric_value is not None:
self.add_model(metric_value, loss, graph, model_id)
self.update(other_info, graph, metric_value, model_id)
except (TimeoutError, queue.Empty) as e:
raise TimeoutError from e
finally:
# terminate and join the subprocess to prevent any resource leak
p.terminate()
p.join()
def update(self, other_info, graph, metric_value, model_id):
""" Update the controller with evaluation result of a neural architecture.
Args:
other_info: Anything. In our case it is the father ID in the search tree.
graph: An instance of Graph. The trained neural architecture.
metric_value: The final evaluated metric value.
model_id: An integer.
"""
father_id = other_info
self.bo.fit([graph.extract_descriptor()], [metric_value])
self.bo.add_child(father_id, model_id)
def generate(self, remaining_time, multiprocessing_queue):
"""Generate the next neural architecture.
Args:
remaining_time: The remaining time in seconds.
multiprocessing_queue: the Queue for multiprocessing return value.
Returns:
other_info: Anything to be saved in the training queue together with the architecture.
generated_graph: An instance of Graph.
"""
generated_graph, new_father_id = self.bo.generate(self.descriptors,
remaining_time, multiprocessing_queue)
if new_father_id is None:
new_father_id = 0
generated_graph = self.generators[0](self.n_classes, self.input_shape). \
generate(self.default_model_len, self.default_model_width)
return new_father_id, generated_graph
def train(q, graph, train_data, test_data, trainer_args, metric, loss, verbose, path):
"""Train the neural architecture."""
try:
model = graph.produce_model()
loss, metric_value = ModelTrainer(model=model,
path=path,
train_data=train_data,
test_data=test_data,
metric=metric,
loss_function=loss,
verbose=verbose).train_model(**trainer_args)
model.set_weight_to_graph()
if q:
q.put((metric_value, loss, model.graph))
return metric_value, loss, model.graph
except RuntimeError as e:
if not re.search('out of memory', str(e)):
raise e
if verbose:
print('\nCurrent model size is too big. Discontinuing training this model to search for other models.')
Constant.MAX_MODEL_SIZE = graph.size() - 1
if q:
q.put((None, None, None))
return None, None, None
| [
"torch.multiprocessing.get_context",
"torch.cuda.empty_cache"
] | 0.4.1 | MustafaKadioglu/autokeras | e75f3194ac4ed6741bc64583fda483dc2f6dfe09 |
0.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import logging
import json
import math
import os
import random
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import printable_text, whitespace_tokenize, BasicTokenizer, BertTokenizer
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering
from pytorch_pretrained_bert.optimization import BertAdam
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class SquadExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (printable_text(self.qas_id))
s += ", question_text: %s" % (
printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
if is_training:
if len(qa["answers"]) != 1:
raise ValueError(
"For training, each question should have exactly 1 answer.")
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or
example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(
[printable_text(x) for x in tokens]))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (printable_text(answer_text)))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position))
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, verbose_logging):
"""Write final predictions to the json file."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the parameters optimized on CPU/RAM back to the model on GPU
"""
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
param_model.data.copy_(param_opti.data)
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
"""
is_nan = False
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
if param_model.grad is not None:
if test_nan and torch.isnan(param_model.grad).sum() > 0:
is_nan = True
if param_opti.grad is None:
param_opti.grad = torch.nn.Parameter(param_opti.data.new().resize_(*param_opti.data.size()))
param_opti.grad.data.copy_(param_model.grad.data)
else:
param_opti.grad = None
return is_nan
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints will be written.")
parser.add_argument("--trained_model", default=None, type=str, required=True,
help="Specify path to trained model required for prediction.")
## Other parameters
parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default=None, type=str,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.")
parser.add_argument("--do_predict", default=False, action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% "
"of training.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", default=False, action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--optimize_on_cpu',
default=False,
action='store_true',
help="Whether to perform optimization and keep the optimizer averages on CPU")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=128,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--do_lower_case', action="store_true", default=True, help="Lowercase the input")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
if args.fp16:
logger.info("16-bits training currently not supported in distributed training")
args.fp16 = False # (see https://github.com/pytorch/pytorch/pull/13496)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits trainiing: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if args.do_predict:
if not args.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory () already exists and is not empty.")
os.makedirs(args.output_dir, exist_ok=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = read_squad_examples(
input_file=args.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model = BertForQuestionAnswering.from_pretrained(args.bert_model)
#the_model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(args.trained_model))
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.fp16:
param_optimizer = [(n, param.clone().detach().to('cpu').float().requires_grad_()) \
for n, param in model.named_parameters()]
elif args.optimize_on_cpu:
param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \
for n, param in model.named_parameters()]
else:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_steps)
global_step = 0
if True:
eval_examples = read_squad_examples(
input_file=args.predict_file, is_training=False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
logger.info("***** Running predictions *****")
logger.info(" Num orig examples = %d", len(eval_examples))
logger.info(" Num split examples = %d", len(eval_features))
logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
logger.info("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
if len(all_results) % 1000 == 0:
logger.info("Processing example: %d" % (len(all_results)))
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results,
args.n_best_size, args.max_answer_length,
args.do_lower_case, output_prediction_file,
output_nbest_file, args.verbose_logging)
if __name__ == "__main__":
main()
| [
"torch.device",
"torch.cuda.manual_seed_all",
"torch.isnan",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.TensorDataset",
"torch.nn.DataParallel"
] | 0.4.1 | Theerit/bert | 2251eac7031f5ca4e7fdcec88c3c96a4a1595cff |
1.9 | import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import sys
sys.path.append('/home/goda/Undergraduate/capstone_design_base/src')
from src.dataset.dataset import MVP
from src.models.pointnet import PointNetCls, feature_transform_regularizer
from src.utils.log import get_log_file, logging_for_train
from src.utils.weights import get_trained_model_directory, save_trained_model
from tqdm import tqdm
import datetime
from pathlib import Path
# Todo 1. scheduler check
# Todo 2. transformation network check
# Todo 3. Saving trained network
#####
THRESHOLD = 10
NUM_POINTS = 2048
BATCH_SIZE = 32
NUM_CLASSES = 16
NUM_EPOCH = 200
FEATURE_TRANSFORM = True
LEARNING_RATE = 0.001
BETAS = (0.9, 0.999)
STEP_SIZE = 20
GAMMA = 0.5
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
NUM_WORKERS = 20
print(DEVICE)
#####
def train(model, train_loader, lr_schedule):
total_loss = 0.0
total_correct = 0.0
total_count = 0
model.train()
for batch_index, (point_clouds, labels, ground_truths) in enumerate(train_loader, start=1):
# sampling
# if NUM_POINTS != 2024:
# indices = torch.randperm(point_clouds.size()[1])
# indices = indices[:NUM_POINTS]
# point_clouds = point_clouds[:, indices, :]
point_clouds = point_clouds.transpose(2, 1) # (batch_size, num_points, 3) -> (batch_size, 3, num_points)
point_clouds, labels = point_clouds.to(DEVICE), labels.to(DEVICE)
optimizer.zero_grad()
scores, trans, trans_feat = model(point_clouds)
loss = F.nll_loss(scores, labels)
if FEATURE_TRANSFORM: # for regularization
loss += feature_transform_regularizer(trans_feat) * 0.001
total_loss += loss.item()
loss.backward()
optimizer.step()
_, predictions = torch.max(scores, 1)
total_correct += (predictions == labels).sum().item()
total_count += labels.size(0)
lr_schedule.step()
return total_loss, batch_index, total_correct, total_count
def evaluate(model, test_loader):
total_loss = 0.0
total_correct = 0.0
total_count = 0
category_correct = [0] * 16
category_count = [0] * 16
model.eval()
with torch.no_grad():
for batch_index, (point_clouds, labels, ground_truths) in enumerate(test_loader, start=1):
# sampling
# if NUM_POINTS != 2024:
# indices = torch.randperm(point_clouds.size()[1])
# indices = indices[:NUM_POINTS]
# point_clouds = point_clouds[:, indices, :]
point_clouds = point_clouds.transpose(2, 1) # (batch_size, num_points, 3) -> (batch_size, 3, num_points)
point_clouds, labels = point_clouds.to(DEVICE), labels.to(DEVICE)
scores, trans, trans_feat = model(point_clouds)
loss = F.nll_loss(scores, labels)
# if FEATURE_TRANSFORM: # for regularization
# loss += feature_transform_regularizer(trans_feat) * 0.001
total_loss += loss
_, predictions = torch.max(scores, 1)
total_correct += (predictions == labels).sum().item()
total_count += labels.size(0)
corrects = (predictions == labels)
for i in range(len(corrects)):
label = labels[i]
category_correct[label] += corrects[i].item()
category_count[label] += 1
return total_loss, batch_index, total_correct, total_count, category_correct, category_count
if __name__ == "__main__":
train_dataset = MVP(
dataset_type="train",
pcd_type="incomplete")
validation_dataset = MVP(
dataset_type="validation",
pcd_type="incomplete")
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=NUM_WORKERS
)
validation_loader = torch.utils.data.DataLoader(
dataset=validation_dataset,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS
)
classifier = PointNetCls(k=NUM_CLASSES, feature_transform=FEATURE_TRANSFORM)
classifier.to(device=DEVICE)
optimizer = optim.Adam(classifier.parameters(), lr=LEARNING_RATE, betas=BETAS)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)
log_file = get_log_file(experiment_type="train", dataset_type="mvp", train_shape="incomplete")
weights_directory = get_trained_model_directory(dataset_type="mvp", train_shape="incomplete")
min_loss = float("inf")
count = 0
for epoch in tqdm(range(NUM_EPOCH)):
train_result = train(model=classifier, train_loader=train_loader, lr_schedule=scheduler)
validation_result = evaluate(model=classifier, test_loader=validation_loader)
if validation_result[0] < min_loss:
save_trained_model(classifier, epoch, weights_directory)
min_loss = validation_result[0]
count = 0
else:
count += 1
logging_for_train(log_file, epoch, train_result, validation_result)
if count >= THRESHOLD:
break
print(f"The Experiments is ended at {datetime.datetime.now()}.")
| [
"torch.device",
"torch.optim.lr_scheduler.StepLR",
"torch.max",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.functional.nll_loss"
] | 1.9.1 | GoDa-Choe/capstone_design | cb3ce264c7720594a64b7e1717247ad12c522116 |
1.0 | import tempfile
import torch
from transformers import MODEL_WITH_HEADS_MAPPING, AutoModelForSequenceClassification, AutoModelWithHeads
from transformers.adapters.composition import BatchSplit, Stack
from transformers.testing_utils import require_torch, torch_device
from .test_adapter_common import create_twin_models
@require_torch
class PredictionHeadModelTestMixin:
batch_size = 1
seq_length = 128
def run_prediction_head_test(
self, model, compare_model, head_name, input_shape=None, output_shape=(1, 2), label_dict=None
):
# first, check if the head is actually correctly registered as part of the pt module
self.assertTrue(f"heads.{head_name}" in dict(model.named_modules()))
# save & reload
with tempfile.TemporaryDirectory() as temp_dir:
model.save_head(temp_dir, head_name)
compare_model.load_head(temp_dir)
# check if adapter was correctly loaded
self.assertTrue(head_name in compare_model.heads)
model.to(torch_device)
compare_model.to(torch_device)
# make a forward pass
model.active_head = head_name
input_shape = input_shape or (self.batch_size, self.seq_length)
in_data = self.get_input_samples(input_shape, config=model.config)
if label_dict:
for k, v in label_dict.items():
in_data[k] = v
output1 = model(**in_data)
# For the Seq2SeqLMOutput logits are at index 0
# ToDo figure out why
idx = "logits" if hasattr(output1, "logits") else 1
self.assertEqual(output_shape, tuple(output1[idx].size()))
# check equal output
compare_model.active_head = head_name
output2 = compare_model(**in_data)
self.assertEqual(len(output1), len(output2))
self.assertTrue(torch.equal(output1[idx], output2[idx]))
def test_classification_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_classification_head"):
self.skipTest("No classification head")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
model1.add_classification_head("dummy")
label_dict = {}
label_dict["labels"] = torch.zeros(self.batch_size, dtype=torch.long, device=torch_device)
self.run_prediction_head_test(model1, model2, "dummy", label_dict=label_dict)
def test_multiple_choice_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_multiple_choice_head"):
self.skipTest("No multiple choice head")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
model1.add_multiple_choice_head("dummy")
label_dict = {}
label_dict["labels"] = torch.ones(self.batch_size, dtype=torch.long, device=torch_device)
self.run_prediction_head_test(
model1, model2, "dummy", input_shape=(self.batch_size, 2, self.seq_length), label_dict=label_dict
)
def test_tagging_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_tagging_head"):
self.skipTest("No tagging head")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
model1.add_tagging_head("dummy")
label_dict = {}
label_dict["labels"] = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
self.run_prediction_head_test(
model1, model2, "dummy", output_shape=(1, self.seq_length, 2), label_dict=label_dict
)
def test_qa_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_qa_head"):
self.skipTest("No QA head")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
model1.add_qa_head("dummy")
label_dict = {}
label_dict["start_positions"] = torch.zeros(self.batch_size, dtype=torch.long, device=torch_device)
label_dict["end_positions"] = torch.zeros(self.batch_size, dtype=torch.long, device=torch_device)
self.run_prediction_head_test(
model1, model2, "dummy", output_shape=(1, self.seq_length), label_dict=label_dict
)
def test_causal_or_seq2seq_lm_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_causal_lm_head"):
if hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_seq2seq_lm_head"):
seq2seq_head = True
else:
self.skipTest("No causal or seq2seq language model head")
else:
seq2seq_head = False
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
if seq2seq_head:
model1.add_seq2seq_lm_head("dummy")
else:
model1.add_causal_lm_head("dummy")
label_dict = {}
label_dict["labels"] = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
self.run_prediction_head_test(
model1, model2, "dummy", output_shape=(1, self.seq_length, model1.config.vocab_size), label_dict=label_dict
)
def test_masked_lm_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_masked_lm_head"):
self.skipTest("No causal or seq2seq language model head")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
model1.add_masked_lm_head("dummy")
label_dict = {}
label_dict["labels"] = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
self.run_prediction_head_test(
model1, model2, "dummy", output_shape=(1, self.seq_length, model1.config.vocab_size), label_dict=label_dict
)
def test_dependency_parsing_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_dependency_parsing_head"):
self.skipTest("No dependency parsing head")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
model1.add_dependency_parsing_head("dummy")
label_dict = {}
label_dict["labels_arcs"] = torch.zeros(
(self.batch_size, self.seq_length), dtype=torch.long, device=torch_device
)
label_dict["labels_rels"] = torch.zeros(
(self.batch_size, self.seq_length), dtype=torch.long, device=torch_device
)
label_dict["word_starts"] = torch.zeros(
(self.batch_size, self.seq_length), dtype=torch.long, device=torch_device
)
self.run_prediction_head_test(
model1, model2, "dummy", output_shape=(1, self.seq_length, self.seq_length + 1, 2), label_dict=label_dict
)
def test_delete_head(self):
model = AutoModelWithHeads.from_config(self.config())
model.eval()
name = "test_head"
self.add_head(model, name)
self.assertTrue(name in model.heads)
self.assertTrue(name in model.config.prediction_heads)
self.assertEqual(name, model.active_head)
model.delete_head(name)
self.assertFalse(name in model.heads)
self.assertFalse(name in model.config.prediction_heads)
self.assertNotEqual(name, model.active_head)
def test_adapter_with_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_classification_head"):
self.skipTest("No classification head available")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
name = "dummy"
model1.add_adapter(name)
model1.add_classification_head(name, num_labels=3)
model1.set_active_adapters(name)
with tempfile.TemporaryDirectory() as temp_dir:
model1.save_adapter(temp_dir, name)
model2.load_adapter(temp_dir)
model2.set_active_adapters(name)
# check equal output
in_data = self.get_input_samples((1, 128), config=model1.config)
model1.to(torch_device)
model2.to(torch_device)
output1 = model1(**in_data)
output2 = model2(**in_data)
self.assertEqual(len(output1), len(output2))
self.assertTrue(torch.equal(output1[0], output2[0]))
self.assertEqual(3, output1[0].size()[1])
def test_adapter_with_head_load_as(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_classification_head"):
self.skipTest("No classification head available")
model1, model2 = create_twin_models(AutoModelWithHeads, self.config)
name = "dummy"
model1.add_adapter(name)
model1.add_classification_head(name, num_labels=3)
model1.set_active_adapters(name)
with tempfile.TemporaryDirectory() as temp_dir:
model1.save_adapter(temp_dir, name)
# reload using a different name
model2.load_adapter(temp_dir, load_as="new_name")
model2.set_active_adapters("new_name")
# check equal output
in_data = self.get_input_samples((1, 128), config=model1.config)
model1.to(torch_device)
model2.to(torch_device)
output1 = model1(**in_data)
output2 = model2(**in_data)
self.assertEqual(len(output1), len(output2))
self.assertTrue(torch.equal(output1[0], output2[0]))
self.assertEqual(3, output1[0].size()[1])
def test_load_full_model(self):
model = AutoModelWithHeads.from_config(self.config())
model.add_classification_head("dummy", layers=1)
true_config = model.get_prediction_heads_config()
with tempfile.TemporaryDirectory() as temp_dir:
# save
model.save_pretrained(temp_dir)
# reload
model = AutoModelWithHeads.from_pretrained(temp_dir)
self.assertIn("dummy", model.heads)
self.assertDictEqual(true_config, model.get_prediction_heads_config())
def test_batch_split_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_classification_head"):
self.skipTest("No classification head available")
model = AutoModelWithHeads.from_config(self.config())
model.add_classification_head("a")
model.add_classification_head("b")
model.active_head = BatchSplit("a", "b", batch_sizes=[1, 2])
in_data = self.get_input_samples((3, 128), config=model.config)
model.to(torch_device)
out = model(**in_data)
self.assertEqual(2, len(out))
self.assertEqual((1, 2), out[0][0].shape)
self.assertEqual((2, 2), out[1][0].shape)
def test_batch_split_adapter_head(self):
model = AutoModelWithHeads.from_config(self.config())
self.add_head(model, "a")
self.add_head(model, "b")
model.add_adapter("a")
model.add_adapter("b")
model.add_adapter("c")
model.set_active_adapters(BatchSplit(Stack("c", "a"), "b", batch_sizes=[2, 1]))
in_data = self.get_input_samples((3, 128), config=model.config)
model.to(torch_device)
out = model(**in_data)
self.assertEqual(2, len(out))
self.assertTrue(isinstance(model.active_head, BatchSplit))
def test_reload_static_to_flex_head(self):
if not hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_classification_head"):
self.skipTest("No classification head available")
static_head_model = AutoModelForSequenceClassification.from_config(self.config())
flex_head_model = AutoModelWithHeads.from_pretrained(
None, config=self.config(), state_dict=static_head_model.state_dict()
)
static_head_model.eval()
flex_head_model.eval()
static_head_model.add_adapter("test")
with tempfile.TemporaryDirectory() as temp_dir:
static_head_model.save_adapter(temp_dir, "test")
loading_info = {}
flex_head_model.load_adapter(temp_dir, loading_info=loading_info)
# Load the adapter a second time to make sure our conversion script doesn't break anything
flex_head_model.load_adapter(temp_dir, loading_info=loading_info)
self.assertEqual(0, len(loading_info["missing_keys"]))
self.assertEqual(0, len(loading_info["unexpected_keys"]))
# adapter and head were loaded
self.assertIn("test", flex_head_model.config.adapters)
self.assertIn("test", flex_head_model.heads)
# check equal output
in_data = self.get_input_samples((1, 128), config=flex_head_model.config)
static_head_model.to(torch_device)
flex_head_model.to(torch_device)
output1 = static_head_model(**in_data, adapter_names=["test"])
output2 = flex_head_model(**in_data, adapter_names=["test"], head="test")
self.assertTrue(torch.all(torch.isclose(output1.logits, output2.logits)))
def test_invertible_adapter_with_head(self):
if hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_masked_lm_head"):
lm_head = "masked_lm"
elif hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_causal_lm_head"):
lm_head = "casual_lm"
elif hasattr(MODEL_WITH_HEADS_MAPPING[self.config_class], "add_seq2seq_lm_head"):
lm_head = "seq2seq_lm"
else:
self.skipTest("No masked or causel language model head")
model = AutoModelWithHeads.from_config(self.config())
model.add_adapter("test", config="pfeiffer+inv")
if lm_head == "casual_lm":
model.add_causal_lm_head("test")
elif lm_head == "masked_lm":
model.add_masked_lm_head("test")
elif lm_head == "seq2seq_lm":
model.add_seq2seq_lm_head("test")
else:
raise RuntimeError("{} is not a valid lm head".format(lm_head))
model.set_active_adapters("test")
# Set a hook before the invertible adapter to make sure it's actually called twice:
# Once after the embedding layer and once in the prediction head.
calls = 0
def forward_pre_hook(module, input):
nonlocal calls
calls += 1
inv_adapter = model.base_model.get_invertible_adapter()
self.assertIsNotNone(inv_adapter)
inv_adapter.register_forward_pre_hook(forward_pre_hook)
in_data = self.get_input_samples((self.batch_size, self.seq_length), config=model.config)
model.to(torch_device)
out = model(**in_data)
self.assertEqual((self.batch_size, self.seq_length, model.config.vocab_size), out[0].shape)
self.assertEqual(2, calls)
| [
"torch.zeros",
"torch.isclose",
"torch.equal",
"torch.ones"
] | 1.0 | HimashiRathnayake/adapter-transformers | d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4 |
1.7 | # Task Inference based meta-rl algorithm using Gaussian mixture models and gated Recurrent units (TIGR)
import os
import numpy as np
import click
import json
import torch
import copy
from rlkit.envs import ENVS
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.torch.sac.policies import TanhGaussianPolicy
from rlkit.torch.networks import Mlp, FlattenMlp
from rlkit.launchers.launcher_util import setup_logger
import rlkit.torch.pytorch_util as ptu
from configs.default import default_config
from tigr.task_inference.prediction_networks import DecoderMDP
from tigr.sac import PolicyTrainer
from tigr.stacked_replay_buffer import StackedReplayBuffer
from tigr.rollout_worker import RolloutCoordinator
from tigr.agent_module import Agent, ScriptedPolicyAgent
from tigr.training_algorithm import TrainingAlgorithm
from tigr.task_inference.true_gmm_inference import DecoupledEncoder
from tigr.trainer.true_gmm_trainer import AugmentedTrainer
from torch.utils.tensorboard import SummaryWriter
import vis_utils.tb_logging as TB
def experiment(variant):
# optional GPU mode
ptu.set_gpu_mode(variant['util_params']['use_gpu'], variant['util_params']['gpu_id'])
torch.set_num_threads(1)
# Important: Gru and Conv only work with trajectory encoding
if variant['algo_params']['encoder_type'] in ['gru'] and variant['algo_params']['encoding_mode'] != 'trajectory':
print(f'\nInformation: Setting encoding mode to trajectory since encoder type '
f'"{variant["algo_params"]["encoder_type"]}" doesn\'t work with '
f'"{variant["algo_params"]["encoding_mode"]}"!\n')
variant['algo_params']['encoding_mode'] = 'trajectory'
elif variant['algo_params']['encoder_type'] in ['transformer', 'conv'] and variant['algo_params']['encoding_mode'] != 'transitionSharedY':
print(f'\nInformation: Setting encoding mode to trajectory since encoder type '
f'"{variant["algo_params"]["encoder_type"]}" doesn\'t work with '
f'"{variant["algo_params"]["encoding_mode"]}"!\n')
variant['algo_params']['encoding_mode'] = 'transitionSharedY'
# Seeding
if(variant['algo_params']['use_fixed_seeding']):
torch.manual_seed(variant['algo_params']['seed'])
np.random.seed(variant['algo_params']['seed'])
# create logging directory
experiment_log_dir = setup_logger(variant['env_name'], variant=variant, exp_id=variant['util_params']['exp_name'],
base_log_dir=variant['util_params']['base_log_dir'], snapshot_mode='gap',
snapshot_gap=variant['algo_params']['snapshot_gap'])
# Create tensorboard writer and reset values
TB.TENSORBOARD_LOGGER = SummaryWriter(log_dir=os.path.join(experiment_log_dir, 'tensorboard'))
TB.LOG_INTERVAL = variant['util_params']['tb_log_interval']
TB.TRAINING_LOG_STEP = 0
TB.AUGMENTATION_LOG_STEP = 0
TB.TI_LOG_STEP = 0
TB.DEBUG_LOG_STEP = 0
# create multi-task environment and sample tasks
env = ENVS[variant['env_name']](**variant['env_params'])
if variant['env_params']['use_normalized_env']:
env = NormalizedBoxEnv(env)
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
reward_dim = 1
tasks = list(range(len(env.tasks)))
train_tasks = list(range(len(env.train_tasks)))
test_tasks = tasks[-variant['env_params']['n_eval_tasks']:]
# Dump task dict as json
name2number = None
if hasattr(env, 'name2number'):
name2number = env.name2number
with open(os.path.join(experiment_log_dir, 'task_dict.json'), 'w') as f:
json.dump(name2number, f)
# instantiate networks
net_complex_enc_dec = variant['reconstruction_params']['net_complex_enc_dec']
latent_dim = variant['algo_params']['latent_size']
time_steps = variant['algo_params']['time_steps']
num_classes = variant['reconstruction_params']['num_classes']
# encoder used: single transitions or trajectories
if variant['algo_params']['encoding_mode'] == 'transitionSharedY':
encoder_input_dim = obs_dim + action_dim + reward_dim + obs_dim
shared_dim = int(encoder_input_dim * net_complex_enc_dec) # dimension of shared encoder output
elif variant['algo_params']['encoding_mode'] == 'trajectory':
encoder_input_dim = time_steps * (obs_dim + action_dim + reward_dim + obs_dim)
shared_dim = int(encoder_input_dim / time_steps * net_complex_enc_dec) # dimension of shared encoder output
else:
raise NotImplementedError
encoder = DecoupledEncoder(
shared_dim,
encoder_input_dim,
latent_dim,
num_classes,
time_steps,
encoding_mode=variant['algo_params']['encoding_mode'],
timestep_combination=variant['algo_params']['timestep_combination'],
encoder_type=variant['algo_params']['encoder_type']
)
decoder = DecoderMDP(
action_dim,
obs_dim,
reward_dim,
latent_dim,
net_complex_enc_dec,
variant['env_params']['state_reconstruction_clip'],
)
M = variant['algo_params']['sac_layer_size']
qf1 = FlattenMlp(
input_size=(obs_dim + latent_dim) + action_dim,
output_size=1,
hidden_sizes=[M, M, M],
)
qf2 = FlattenMlp(
input_size=(obs_dim + latent_dim) + action_dim,
output_size=1,
hidden_sizes=[M, M, M],
)
target_qf1 = FlattenMlp(
input_size=(obs_dim + latent_dim) + action_dim,
output_size=1,
hidden_sizes=[M, M, M],
)
target_qf2 = FlattenMlp(
input_size=(obs_dim + latent_dim) + action_dim,
output_size=1,
hidden_sizes=[M, M, M],
)
policy = TanhGaussianPolicy(
obs_dim=(obs_dim + latent_dim),
action_dim=action_dim,
latent_dim=latent_dim,
hidden_sizes=[M, M, M],
)
alpha_net = Mlp(
hidden_sizes=[latent_dim * 10],
input_size=latent_dim,
output_size=1
)
networks = {'encoder': encoder,
'decoder': decoder,
'qf1': qf1,
'qf2': qf2,
'target_qf1': target_qf1,
'target_qf2': target_qf2,
'policy': policy,
'alpha_net': alpha_net}
replay_buffer = StackedReplayBuffer(
variant['algo_params']['max_replay_buffer_size'],
time_steps,
obs_dim,
action_dim,
latent_dim,
variant['algo_params']['permute_samples'],
variant['algo_params']['encoding_mode'],
variant['algo_params']['sampling_mode']
)
replay_buffer_augmented = StackedReplayBuffer(
variant['algo_params']['max_replay_buffer_size'],
time_steps,
obs_dim,
action_dim,
latent_dim,
variant['algo_params']['permute_samples'],
variant['algo_params']['encoding_mode'],
variant['algo_params']['sampling_mode']
)
# optionally load pre-trained weights
if variant['path_to_weights'] is not None:
itr = variant['showcase_itr']
path = variant['path_to_weights']
for name, net in networks.items():
try:
net.load_state_dict(torch.load(os.path.join(path, name + '_itr_' + str(itr) + '.pth'), map_location='cpu'))
except Exception as e:
print(f'Loading weights for net {name} failed. Skipping.')
print(f'Loaded weights "{variant["path_to_weights"]}"')
if os.path.exists(os.path.join(variant['path_to_weights'], 'stats_dict.json')):
with open(os.path.join(variant['path_to_weights'], 'stats_dict.json'), 'r') as f:
# Copy so not both changed during updates
d = npify_dict(json.load(f))
replay_buffer.stats_dict = d
replay_buffer_augmented.stats_dict = copy.deepcopy(d)
else:
if variant['algo_params']['use_data_normalization']:
raise ValueError('WARNING: No stats dict for replay buffer was found. '
'Stats dict is required for the algorithm to work properly!')
#Agent
agent_class = ScriptedPolicyAgent if variant['env_params']['scripted_policy'] else Agent
agent = agent_class(
encoder,
policy
)
# Rollout Coordinator
rollout_coordinator = RolloutCoordinator(
env,
variant['env_name'],
variant['env_params'],
variant['train_or_showcase'],
agent,
replay_buffer,
variant['algo_params']['batch_size_rollout'],
time_steps,
variant['algo_params']['max_path_length'],
variant['algo_params']['permute_samples'],
variant['algo_params']['encoding_mode'],
variant['util_params']['use_multiprocessing'],
variant['algo_params']['use_data_normalization'],
variant['util_params']['num_workers'],
variant['util_params']['gpu_id'],
variant['env_params']['scripted_policy']
)
reconstruction_trainer = AugmentedTrainer(
encoder,
decoder,
replay_buffer,
None,
variant['algo_params']['batch_size_reconstruction'],
num_classes,
latent_dim,
time_steps,
variant['reconstruction_params']['lr_decoder'],
variant['reconstruction_params']['lr_encoder'],
variant['reconstruction_params']['alpha_kl_z'],
variant['reconstruction_params']['beta_euclid'],
variant['reconstruction_params']['gamma_sparsity'],
variant['reconstruction_params']['regularization_lambda'],
variant['reconstruction_params']['use_state_diff'],
variant['env_params']['state_reconstruction_clip'],
variant['algo_params']['use_data_normalization'],
variant['reconstruction_params']['train_val_percent'],
variant['reconstruction_params']['eval_interval'],
variant['reconstruction_params']['early_stopping_threshold'],
experiment_log_dir,
variant['reconstruction_params']['use_regularization_loss'],
use_PCGrad = variant['PCGrad_params']['use_PCGrad'],
PCGrad_option = variant['PCGrad_params']['PCGrad_option'],
optimizer_class = torch.optim.Adam,
)
# PolicyTrainer
policy_trainer = PolicyTrainer(
policy,
qf1,
qf2,
target_qf1,
target_qf2,
alpha_net,
encoder,
replay_buffer,
replay_buffer_augmented,
variant['algo_params']['batch_size_policy'],
action_dim,
'tree_sampling',
variant['algo_params']['use_data_normalization'],
use_automatic_entropy_tuning=variant['algo_params']['automatic_entropy_tuning'],
target_entropy_factor=variant['algo_params']['target_entropy_factor'],
alpha=variant['algo_params']['sac_alpha'],
use_PCGrad=variant['PCGrad_params']['use_PCGrad'],
PCGrad_option=variant['PCGrad_params']['PCGrad_option']
)
algorithm = TrainingAlgorithm(
replay_buffer,
replay_buffer_augmented,
rollout_coordinator,
reconstruction_trainer,
policy_trainer,
agent,
networks,
train_tasks,
test_tasks,
variant['task_distribution'],
latent_dim,
num_classes,
variant['algo_params']['use_data_normalization'],
variant['algo_params']['num_train_epochs'],
variant['showcase_itr'] if variant['path_to_weights'] is not None else 0,
variant['algo_params']['num_training_steps_reconstruction'],
variant['algo_params']['num_training_steps_policy'],
variant['algo_params']['num_train_tasks_per_episode'],
variant['algo_params']['num_transitions_per_episode'],
variant['algo_params']['augmented_start_percentage'],
variant['algo_params']['augmented_every'],
variant['algo_params']['augmented_rollout_length'],
variant['algo_params']['augmented_rollout_batch_size'],
variant['algo_params']['num_eval_trajectories'],
variant['algo_params']['test_evaluation_every'],
variant['algo_params']['num_showcase'],
experiment_log_dir,
name2number
)
if ptu.gpu_enabled():
algorithm.to()
# debugging triggers a lot of printing and logs to a debug directory
DEBUG = variant['util_params']['debug']
PLOT = variant['util_params']['plot']
os.environ['DEBUG'] = str(int(DEBUG))
os.environ['PLOT'] = str(int(PLOT))
# create temp folder
if not os.path.exists(variant['reconstruction_params']['temp_folder']):
os.makedirs(variant['reconstruction_params']['temp_folder'])
# run the algorithm
if variant['train_or_showcase'] == 'train':
algorithm.train()
algorithm.showcase_task_inference()
elif variant['train_or_showcase'] == 'showcase_all':
algorithm.showcase_all()
elif variant['train_or_showcase'] == 'showcase_task_inference':
algorithm.showcase_task_inference()
elif variant['train_or_showcase'] == 'showcase_non_stationary_env':
algorithm.showcase_non_stationary_env()
def npify_dict(d: dict):
for k, v in d.items():
if type(v) is dict:
d[k] = npify_dict(v)
else:
d[k] = np.asarray(v)
return d
def deep_update_dict(fr, to):
''' update dict of dicts with new values '''
# assume dicts have same keys
for k, v in fr.items():
if type(v) is dict:
deep_update_dict(v, to[k])
else:
to[k] = v
return to
@click.command()
@click.argument('config', default=None)
@click.option('--name', default='')
@click.option('--ti_option', default='')
@click.option('--gpu', default=None)
@click.option('--num_workers', default=None)
@click.option('--use_mp', is_flag=True, default=None)
def click_main(config, name, ti_option, gpu, use_mp, num_workers):
main(config, name, ti_option, gpu, use_mp, num_workers)
def main(config=None, name='', ti_option='', gpu=None, use_mp=None, num_workers=None):
variant = default_config
if config:
with open(os.path.join(config)) as f:
exp_params = json.load(f)
variant = deep_update_dict(exp_params, variant)
# Only set values from input if they are actually inputted
variant['inference_option'] = variant['inference_option'] if ti_option == '' else ti_option
variant['util_params']['exp_name'] = f'{os.path.splitext(os.path.split(config)[1])[0].replace("-", "_") if config is not None else "default"}_' + variant['inference_option'] + (f'_{name}' if name != '' else f'')
variant['util_params']['use_gpu'] = variant['util_params']['use_gpu'] if gpu != '' else False
variant['util_params']['gpu_id'] = variant['util_params']['gpu_id'] if gpu is None else gpu
variant['util_params']['use_multiprocessing'] = variant['util_params']['use_multiprocessing'] if use_mp is None else use_mp
variant['util_params']['num_workers'] = variant['util_params']['num_workers'] if num_workers is None else int(num_workers)
experiment(variant)
if __name__ == "__main__":
click_main()
| [
"torch.manual_seed",
"torch.set_num_threads"
] | 1.7.0 | lknak/tigr | 614a6435c483a25cb8183c08184d140120053a4f |
1.2 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import torch
from torch.utils.data import Dataset, DataLoader
import os
import numpy as np
import h5py
import os.path as osp
import sys
import scipy.misc
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
class NTUDataset(Dataset):
"""
NTU Skeleton Dataset.
Args:
x (list): Input dataset, each element in the list is an ndarray corresponding to
a joints matrix of a skeleton sequence sample
y (list): Action labels
"""
def __init__(self, x, y):
self.x = x
self.y = np.array(y, dtype='int')
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return [self.x[index], int(self.y[index])]
class NTUDataLoaders(object):
def __init__(self, dataset = 'NTU', case = 1, aug = 0):
self.dataset = dataset
self.case = case
self.aug = aug
self.create_datasets()
self.train_set = NTUDataset(self.train_X, self.train_Y)
self.val_set = NTUDataset(self.val_X, self.val_Y)
self.test_set = NTUDataset(self.test_X, self.test_Y)
self.val_out_set = NTUDataset(self.val_out_X, self.val_Y)
self.test_out_set = NTUDataset(self.test_out_X, self.test_Y)
def get_train_loader(self, batch_size, num_workers):
if self.aug == 1:
return DataLoader(self.train_set, batch_size=batch_size,
shuffle=True, num_workers=num_workers, pin_memory=True) # removed collate function
else:
return DataLoader(self.train_set, batch_size=batch_size,
shuffle=True, num_workers=num_workers,
collate_fn=self.collate_fn, pin_memory=True)
def get_val_loader(self, batch_size, num_workers):
return DataLoader(self.val_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def get_test_loader(self, batch_size, num_workers):
return DataLoader(self.test_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def get_val_out_loader(self, batch_size, num_workers):
return DataLoader(self.val_out_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def get_test_out_loader(self, batch_size, num_workers):
return DataLoader(self.test_out_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def torgb(self, ske_joints):
rgb = []
maxmin = list()
self.idx = 0
for ske_joint in ske_joints:
zero_row = []
if self.dataset == 'NTU':
for i in range(len(ske_joint)):
if (ske_joint[i, :] == np.zeros((1, 150))).all():
zero_row.append(i)
ske_joint = np.delete(ske_joint, zero_row, axis=0)
if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75), axis=1)
elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75, 150), axis=1)
max_val = self.max
min_val = self.min
#### original rescale to 0-255
ske_joint = 255 * (ske_joint - min_val) / (max_val - min_val)
rgb_ske = np.reshape(ske_joint, (ske_joint.shape[0], ske_joint.shape[1] //3, 3))
rgb_ske = scipy.misc.imresize(rgb_ske, (224, 224)).astype(np.float32)
rgb_ske = center(rgb_ske)
rgb_ske = np.transpose(rgb_ske, [1, 0, 2])
rgb_ske = np.transpose(rgb_ske, [2,1,0])
rgb.append(rgb_ske)
maxmin.append([max_val, min_val])
self.idx = self.idx +1
return rgb, maxmin
def compute_max_min(self, ske_joints):
max_vals, min_vals = list(), list()
for ske_joint in ske_joints:
zero_row = []
if self.dataset == 'NTU':
for i in range(len(ske_joint)):
if (ske_joint[i, :] == np.zeros((1, 150))).all():
zero_row.append(i)
ske_joint = np.delete(ske_joint, zero_row, axis=0)
if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75), axis=1)
elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75, 150), axis=1)
max_val = ske_joint.max()
min_val = ske_joint.min()
max_vals.append(float(max_val))
min_vals.append(float(min_val))
max_vals, min_vals = np.array(max_vals), np.array(min_vals)
return max_vals.max(), min_vals.min()
def collate_fn_aug(self,batch):
x, y = zip(*batch)
x = torch.stack([torch.from_numpy(x[i]) for i in range(len(x))], 0)
x = _transform(x)
x, maxmin = self.torgb(x.numpy())
x = torch.stack([torch.from_numpy(x[i]) for i in range(len(x))], 0)
y = torch.LongTensor(y)
return [x,torch.FloatTensor(maxmin), y]
def collate_fn(self,batch):
x, y = zip(*batch)
x, maxmin = self.torgb(x)
x = torch.stack([torch.from_numpy(x[i]) for i in range(len(x))], 0)
y = torch.LongTensor(y)
return [x,torch.FloatTensor(maxmin), y]
def get_train_size(self):
return len(self.train_Y)
def get_val_size(self):
return len(self.val_Y)
def get_test_size(self):
return len(self.test_Y)
def create_datasets(self):
if self.dataset =='NTU':
if self.case == 0:
self.metric = 'CS'
else:
self.metric = 'CV'
path = self.dataset
if 'ntu_splits_val' in path:
f = h5py.File(path, 'r')
self.train_X = f['train_x'][:]
self.train_Y = np.argmax(f['train_y'][:],-1)
self.val_X = f['ztest_x'][:]
self.val_Y = np.argmax(f['ztest_y'][:], -1)
self.test_X = f['val_x'][:]
self.test_Y = np.argmax(f['val_y'][:], -1)
elif 'ntu_splits/ZSL' in path:
f = h5py.File(path, 'r')
self.train_X = f['x'][:]
self.train_Y = np.argmax(f['y'][:],-1)
self.val_X = f['ztest_x'][:]
self.val_Y = np.argmax(f['ztest_y'][:], -1)
self.test_X = f['gtest_x'][:]
self.test_Y = np.argmax(f['gtest_y'][:], -1)
elif 'ntu_results/shift_val' in path:
self.train_X = np.load(path + '/train.npy')
self.train_Y = np.load(path + '/train_label.npy')
self.val_X = np.load(path + '/ztest.npy')
self.val_Y = np.load(path + '/z_label.npy')
self.test_X = np.load(path + '/val.npy')
# print(self.test_X.shape)
self.test_Y = np.load(path + '/val_label.npy')
self.val_out_X = np.load(path + '/ztest_out.npy')
self.test_out_X = np.load(path + '/val_out.npy')
else:
# f = h5py.File(path, 'r')
self.train_X = np.load(path + '/train.npy')
self.train_Y = np.load(path + '/train_label.npy')
self.val_X = np.load(path + '/ztest.npy')
self.val_Y = np.load(path + '/z_label.npy')
self.test_X = np.load(path + '/gtest.npy')
self.test_Y = np.load(path + '/g_label.npy')
self.val_out_X = None
# np.load(path + '/ztest_out.npy')
self.test_out_X = None
# np.load(path + '/gtest_out.npy')
if self.dataset == 'NTU':
self.max = 5.18858098984
self.min = -5.28981208801
else:
x = np.concatenate([self.train_X, self.val_X, self.test_X], 0)
max_val, min_val = self.compute_max_min(x)
self.max = max_val
self.min = min_val
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def center(rgb):
rgb[:,:,0] -= 110
rgb[:,:,1] -= 110
rgb[:,:,2] -= 110
return rgb
def padding(joints, max_len=300, pad_value=0.):
num_frames, feat_dim = joints.shape
if feat_dim == 75:
joints = np.hstack((joints, np.zeros((num_frames, 75), dtype=joints.dtype)))
if num_frames < max_len:
joints = np.vstack(
(joints, np.ones((max_len - num_frames, 150), dtype=joints.dtype) * pad_value))
return joints
def _rot(rot):
cos_r, sin_r = rot.cos(), rot.sin()
zeros = rot.new(rot.size()[:2] + (1,)).zero_()
ones = rot.new(rot.size()[:2] + (1,)).fill_(1)
r1 = torch.stack((ones, zeros, zeros),dim=-1)
rx2 = torch.stack((zeros, cos_r[:,:,0:1], sin_r[:,:,0:1]), dim = -1)
rx3 = torch.stack((zeros, -sin_r[:,:,0:1], cos_r[:,:,0:1]), dim = -1)
rx = torch.cat((r1, rx2, rx3), dim = 2)
ry1 = torch.stack((cos_r[:,:,1:2], zeros, -sin_r[:,:,1:2]), dim =-1)
r2 = torch.stack((zeros, ones, zeros),dim=-1)
ry3 = torch.stack((sin_r[:,:,1:2], zeros, cos_r[:,:,1:2]), dim =-1)
ry = torch.cat((ry1, r2, ry3), dim = 2)
rz1 = torch.stack((cos_r[:,:,2:3], sin_r[:,:,2:3], zeros), dim =-1)
r3 = torch.stack((zeros, zeros, ones),dim=-1)
rz2 = torch.stack((-sin_r[:,:,2:3], cos_r[:,:,2:3],zeros), dim =-1)
rz = torch.cat((rz1, rz2, r3), dim = 2)
rot = rz.matmul(ry).matmul(rx)
return rot
def _transform(x):
x = x.contiguous().view(x.size()[:2] + (-1, 3))
rot = x.new(x.size()[0],3).uniform_(-0.3, 0.3)
rot = rot.repeat(1, x.size()[1])
rot = rot.contiguous().view((-1, x.size()[1], 3))
rot = _rot(rot)
x = torch.transpose(x, 2, 3)
x = torch.matmul(rot, x)
x = torch.transpose(x, 2, 3)
x = x.contiguous().view(x.size()[:2] + (-1,))
return x
def make_dir(dataset, case, subdir):
if dataset == 'NTU':
output_dir = os.path.join('./models/va-cnn/NTU/')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
return output_dir
def get_cases(dataset):
if dataset[0:3] == 'NTU':
cases = 2
return cases
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
def get_num_classes(dataset):
if dataset == 'NTU':
return 60
| [
"torch.cat",
"torch.stack",
"torch.FloatTensor",
"torch.from_numpy",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"torch.transpose",
"torch.matmul"
] | 1.2.0 | skelemoa/synse-zsl | 90f39a118170d708843c5d4305bd807905cb4c54 |
1.2 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import torch
from torch.utils.data import Dataset, DataLoader
import os
import numpy as np
import h5py
import os.path as osp
import sys
import scipy.misc
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
class NTUDataset(Dataset):
"""
NTU Skeleton Dataset.
Args:
x (list): Input dataset, each element in the list is an ndarray corresponding to
a joints matrix of a skeleton sequence sample
y (list): Action labels
"""
def __init__(self, x, y):
self.x = x
self.y = np.array(y, dtype='int')
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return [self.x[index], int(self.y[index])]
class NTUDataLoaders(object):
def __init__(self, dataset = 'NTU', case = 1, aug = 0):
self.dataset = dataset
self.case = case
self.aug = aug
self.create_datasets()
self.train_set = NTUDataset(self.train_X, self.train_Y)
self.val_set = NTUDataset(self.val_X, self.val_Y)
self.test_set = NTUDataset(self.test_X, self.test_Y)
self.val_out_set = NTUDataset(self.val_out_X, self.val_Y)
self.test_out_set = NTUDataset(self.test_out_X, self.test_Y)
def get_train_loader(self, batch_size, num_workers):
if self.aug == 1:
return DataLoader(self.train_set, batch_size=batch_size,
shuffle=True, num_workers=num_workers, pin_memory=True) # removed collate function
else:
return DataLoader(self.train_set, batch_size=batch_size,
shuffle=True, num_workers=num_workers,
collate_fn=self.collate_fn, pin_memory=True)
def get_val_loader(self, batch_size, num_workers):
return DataLoader(self.val_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def get_test_loader(self, batch_size, num_workers):
return DataLoader(self.test_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def get_val_out_loader(self, batch_size, num_workers):
return DataLoader(self.val_out_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def get_test_out_loader(self, batch_size, num_workers):
return DataLoader(self.test_out_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def torgb(self, ske_joints):
rgb = []
maxmin = list()
self.idx = 0
for ske_joint in ske_joints:
zero_row = []
if self.dataset == 'NTU':
for i in range(len(ske_joint)):
if (ske_joint[i, :] == np.zeros((1, 150))).all():
zero_row.append(i)
ske_joint = np.delete(ske_joint, zero_row, axis=0)
if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75), axis=1)
elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75, 150), axis=1)
max_val = self.max
min_val = self.min
#### original rescale to 0-255
ske_joint = 255 * (ske_joint - min_val) / (max_val - min_val)
rgb_ske = np.reshape(ske_joint, (ske_joint.shape[0], ske_joint.shape[1] //3, 3))
rgb_ske = scipy.misc.imresize(rgb_ske, (224, 224)).astype(np.float32)
rgb_ske = center(rgb_ske)
rgb_ske = np.transpose(rgb_ske, [1, 0, 2])
rgb_ske = np.transpose(rgb_ske, [2,1,0])
rgb.append(rgb_ske)
maxmin.append([max_val, min_val])
self.idx = self.idx +1
return rgb, maxmin
def compute_max_min(self, ske_joints):
max_vals, min_vals = list(), list()
for ske_joint in ske_joints:
zero_row = []
if self.dataset == 'NTU':
for i in range(len(ske_joint)):
if (ske_joint[i, :] == np.zeros((1, 150))).all():
zero_row.append(i)
ske_joint = np.delete(ske_joint, zero_row, axis=0)
if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75), axis=1)
elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75, 150), axis=1)
max_val = ske_joint.max()
min_val = ske_joint.min()
max_vals.append(float(max_val))
min_vals.append(float(min_val))
max_vals, min_vals = np.array(max_vals), np.array(min_vals)
return max_vals.max(), min_vals.min()
def collate_fn_aug(self,batch):
x, y = zip(*batch)
x = torch.stack([torch.from_numpy(x[i]) for i in range(len(x))], 0)
x = _transform(x)
x, maxmin = self.torgb(x.numpy())
x = torch.stack([torch.from_numpy(x[i]) for i in range(len(x))], 0)
y = torch.LongTensor(y)
return [x,torch.FloatTensor(maxmin), y]
def collate_fn(self,batch):
x, y = zip(*batch)
x, maxmin = self.torgb(x)
x = torch.stack([torch.from_numpy(x[i]) for i in range(len(x))], 0)
y = torch.LongTensor(y)
return [x,torch.FloatTensor(maxmin), y]
def get_train_size(self):
return len(self.train_Y)
def get_val_size(self):
return len(self.val_Y)
def get_test_size(self):
return len(self.test_Y)
def create_datasets(self):
if self.dataset =='NTU':
if self.case == 0:
self.metric = 'CS'
else:
self.metric = 'CV'
path = osp.join('/ssd_scratch/cvit/pranay.gupta/', self.dataset)
if 'ntu_splits_val' in path:
f = h5py.File(path, 'r')
self.train_X = f['train_x'][:]
self.train_Y = np.argmax(f['train_y'][:],-1)
self.val_X = f['ztest_x'][:]
self.val_Y = np.argmax(f['ztest_y'][:], -1)
self.test_X = f['val_x'][:]
self.test_Y = np.argmax(f['val_y'][:], -1)
elif 'ntu_splits/ZSL' in path:
f = h5py.File(path, 'r')
self.train_X = f['x'][:]
self.train_Y = np.argmax(f['y'][:],-1)
self.val_X = f['ztest_x'][:]
self.val_Y = np.argmax(f['ztest_y'][:], -1)
self.test_X = f['gtest_x'][:]
self.test_Y = np.argmax(f['gtest_y'][:], -1)
elif 'ntu_results/shift_val' in path:
self.train_X = np.load(path + '/train.npy')
self.train_Y = np.load(path + '/train_label.npy')
self.val_X = np.load(path + '/ztest.npy')
self.val_Y = np.load(path + '/z_label.npy')
self.test_X = np.load(path + '/val.npy')
self.test_Y = np.load(path + '/val_label.npy')
self.val_out_X = np.load(path + '/ztest_out.npy')
self.test_out_X = np.load(path + '/val_out.npy')
else:
# f = h5py.File(path, 'r')
self.train_X = np.load(path + '/train.npy')
self.train_Y = np.load(path + '/train_label.npy')
self.val_X = np.load(path + '/ztest.npy')
self.val_Y = np.load(path + '/z_label.npy')
self.test_X = np.load(path + '/gtest.npy')
self.test_Y = np.load(path + '/g_label.npy')
self.val_out_X = None
#np.load(path + '/ztest_out.npy')
self.test_out_X = None
#np.load(path + '/gtest_out.npy')
if self.dataset == 'NTU':
self.max = 5.18858098984
self.min = -5.28981208801
else:
x = np.concatenate([self.train_X, self.val_X, self.test_X], 0)
max_val, min_val = self.compute_max_min(x)
self.max = max_val
self.min = min_val
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def center(rgb):
rgb[:,:,0] -= 110
rgb[:,:,1] -= 110
rgb[:,:,2] -= 110
return rgb
def padding(joints, max_len=300, pad_value=0.):
num_frames, feat_dim = joints.shape
if feat_dim == 75:
joints = np.hstack((joints, np.zeros((num_frames, 75), dtype=joints.dtype)))
if num_frames < max_len:
joints = np.vstack(
(joints, np.ones((max_len - num_frames, 150), dtype=joints.dtype) * pad_value))
return joints
def _rot(rot):
cos_r, sin_r = rot.cos(), rot.sin()
zeros = rot.new(rot.size()[:2] + (1,)).zero_()
ones = rot.new(rot.size()[:2] + (1,)).fill_(1)
r1 = torch.stack((ones, zeros, zeros),dim=-1)
rx2 = torch.stack((zeros, cos_r[:,:,0:1], sin_r[:,:,0:1]), dim = -1)
rx3 = torch.stack((zeros, -sin_r[:,:,0:1], cos_r[:,:,0:1]), dim = -1)
rx = torch.cat((r1, rx2, rx3), dim = 2)
ry1 = torch.stack((cos_r[:,:,1:2], zeros, -sin_r[:,:,1:2]), dim =-1)
r2 = torch.stack((zeros, ones, zeros),dim=-1)
ry3 = torch.stack((sin_r[:,:,1:2], zeros, cos_r[:,:,1:2]), dim =-1)
ry = torch.cat((ry1, r2, ry3), dim = 2)
rz1 = torch.stack((cos_r[:,:,2:3], sin_r[:,:,2:3], zeros), dim =-1)
r3 = torch.stack((zeros, zeros, ones),dim=-1)
rz2 = torch.stack((-sin_r[:,:,2:3], cos_r[:,:,2:3],zeros), dim =-1)
rz = torch.cat((rz1, rz2, r3), dim = 2)
rot = rz.matmul(ry).matmul(rx)
return rot
def _transform(x):
x = x.contiguous().view(x.size()[:2] + (-1, 3))
rot = x.new(x.size()[0],3).uniform_(-0.3, 0.3)
rot = rot.repeat(1, x.size()[1])
rot = rot.contiguous().view((-1, x.size()[1], 3))
rot = _rot(rot)
x = torch.transpose(x, 2, 3)
x = torch.matmul(rot, x)
x = torch.transpose(x, 2, 3)
x = x.contiguous().view(x.size()[:2] + (-1,))
return x
def make_dir(dataset, case, subdir):
if dataset == 'NTU':
output_dir = os.path.join('./models/va-cnn/NTU/')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
return output_dir
def get_cases(dataset):
if dataset[0:3] == 'NTU':
cases = 2
return cases
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
def get_num_classes(dataset):
if dataset == 'NTU':
return 60
| [
"torch.cat",
"torch.stack",
"torch.FloatTensor",
"torch.from_numpy",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"torch.transpose",
"torch.matmul"
] | 1.2.0 | skelemoa/synse-zsl | 90f39a118170d708843c5d4305bd807905cb4c54 |
1.9 | from functools import reduce
from math import sqrt
from typing import Any, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from ai_traineree.networks import NetworkType
from ai_traineree.types import FeatureType
def hidden_init(layer: nn.Module):
fan_in = layer.weight.data.size()[0] # type: ignore
lim = 1. / sqrt(fan_in)
return (-lim, lim)
def layer_init(layer: nn.Module, range_value: Optional[Tuple[float, float]]=None, remove_mean=True):
if not (isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear)):
return
if range_value is not None:
layer.weight.data.uniform_(*range_value) # type: ignore
if remove_mean:
layer.weight.data -= layer.weight.data.mean()
nn.init.xavier_uniform_(layer.weight)
class ScaleNet(NetworkType):
def __init__(self, scale: Union[float, int]) -> None:
super(ScaleNet, self).__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class ConvNet(NetworkType):
def __init__(self, input_dim: Sequence[int], **kwargs):
"""Convolution Network.
Constructs a layered network over torch.nn.Conv2D. Number of layers is set based on `hidden_layers` argument.
To update other arguments, e.g. kernel_size or bias, pass either a single value or a tuple of the same
length as `hidden_layers`.
Quick reminder from the PyTorch doc (https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html).
Keyword Arguments:
in_channels (int): Number of channels in the input image
hidden_layers (tuple of ints): Number of channels in each hidden layer
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
padding_mode (string, optional): 'zeros', 'reflect', 'replicate' or 'circular'. Default: 'zeros'
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
Examples:
>>> config = {"hidden_layers": (300, 200, 100), "kernel_size": 6, "gate": F.relu}
>>> net = ConvNet(input_dim=(10, 10, 3), **config)
>>> config = {"hidden_layers": (64, 32, 64), "kernel_size": (3, 4, 3), padding: 2, "gate": F.relu}
>>> net = ConvNet(input_dim=(20, 10, 1), **config)
"""
super(ConvNet, self).__init__()
# input_dim = (num_layers, x_img, y_img, channels)
hidden_layers = kwargs.get("hidden_layers", (20, 20))
num_layers = [input_dim[0]] + list(hidden_layers)
gate = kwargs.get("gate", nn.ReLU)
max_pool_sizes = self._expand_to_seq(kwargs.get("max_pool_size", 2), len(hidden_layers))
kernel_sizes = self._expand_to_seq(kwargs.get("kernel_size", 3), len(hidden_layers))
strides = self._expand_to_seq(kwargs.get("stride", 1), len(hidden_layers))
paddings = self._expand_to_seq(kwargs.get("padding", 0), len(hidden_layers))
dilations = self._expand_to_seq(kwargs.get("dilation", 1), len(hidden_layers))
biases = self._expand_to_seq(kwargs.get('bias', True), len(hidden_layers))
layers = []
for layer_idx in range(len(hidden_layers)):
layers.append(
nn.Conv2d(
num_layers[layer_idx], num_layers[layer_idx+1],
kernel_size=kernel_sizes[layer_idx],
stride=strides[layer_idx],
padding=paddings[layer_idx],
dilation=dilations[layer_idx],
bias=biases[layer_idx],
)
)
if max_pool_sizes[layer_idx] > 1:
layers.append(nn.MaxPool2d(max_pool_sizes[layer_idx]))
if gate is not None:
layers.append(gate())
self.layers = nn.ModuleList(layers)
self.reset_parameters()
self.input_dim = input_dim
self.device = kwargs.get("device")
self.to(self.device)
@staticmethod
def _expand_to_seq(o: Union[Any, Sequence[Any]], size) -> Sequence[Any]:
return o if isinstance(o, Sequence) else (o,)*size
@property
def output_size(self):
return reduce(lambda a, b: a*b, self._calculate_output_size(self.input_dim, self.layers))
@torch.no_grad()
def _calculate_output_size(self, input_dim: Sequence[int], layers) -> Sequence[int]:
test_tensor = torch.zeros((1,) + tuple(input_dim)).to(self.device)
out = self.forward(test_tensor)
return out.shape
def reset_parameters(self):
self.layers.apply(layer_init)
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class FcNet(NetworkType):
"""
For the activation layer we use tanh by default which was observed to be much better, e.g. compared to ReLU,
for policy networks [1]. The last gate, however, might be changed depending on the actual task.
References
.. [1] "What Matters In On-Policy Reinforcement Learning? A Large-Scale Empirical Study"
by M. Andrychowicz et al. (2020). Link: https://arxiv.org/abs/2006.05990
"""
def __init__(
self,
in_features: FeatureType,
out_features: FeatureType,
hidden_layers: Optional[Sequence[int]]=(200, 100),
last_layer_range=(-3e-4, 3e-4),
bias: bool=True,
**kwargs
):
"""Fully Connected network with default APIs.
Parameters:
in_features (sequence of ints): Shape of the input.
out_features (sequence of ints): Shape of the output.
hidden_layers: Shape of the hidden layers. If None, then the output is directly computed from the input.
last_layer_range: The range for the uniform distribution that initiates the last layer.
Keyword arguments:
gate (optional torch.nn.layer): Activation function for each layer, expect the last. Default: torch.tanh.
gate_out (optional torch.nn.layer): Activation function after the last layer. Default: Identity layer.
device (torch.devce or str): Device where to allocate memory. CPU or CUDA.
"""
super(FcNet, self).__init__()
assert len(in_features) == 1, "Expected only one dimension"
assert len(out_features) == 1, "Expected only one dimension"
self.in_features = tuple(in_features)
self.out_features = tuple(out_features)
num_layers = tuple(hidden_layers) if hidden_layers is not None else tuple()
num_layers = self.in_features + num_layers + self.out_features
layers = [nn.Linear(dim_in, dim_out, bias=bias) for dim_in, dim_out in zip(num_layers[:-1], num_layers[1:])]
self.last_layer_range = last_layer_range
self.layers = nn.ModuleList(layers)
self.reset_parameters()
self.gate = kwargs.get("gate", torch.tanh)
self.gate_out = kwargs.get("gate_out", nn.Identity())
self.to(kwargs.get("device"))
def reset_parameters(self):
for layer in self.layers[:-1]:
layer_init(layer, hidden_init(layer), remove_mean=True)
layer_init(self.layers[-1], self.last_layer_range, remove_mean=True)
def forward(self, x):
for layer in self.layers[:-1]:
x = self.gate(layer(x))
return self.gate_out(self.layers[-1](x))
###
# In most cases, the default ActorBody can be associated with a fully connected network.
# The alias is only for convinience and, hopefully, better understanding of some algorithms.
###
ActorBody = FcNet
class CriticBody(NetworkType):
"""Extension of the FcNet which includes actions.
Mainly used to estimate the state-action value function in actor-critic agents.
Actions are included (by default) in the first hidden layer (changeable).
Since the main purpose for this is value function estimation the output is a single value.
"""
def __init__(
self,
in_features: FeatureType,
inj_action_size: int,
out_features: FeatureType = (1,),
hidden_layers: Optional[Sequence[int]]=(100, 100),
inj_actions_layer: int=1,
**kwargs
):
"""
Parameters:
in_features (tuple of ints): Dimension of the input features.
inj_action_size (int): Dimension of the action vector that is injected into `inj_action_layer`.
out_features (tuple of ints): Dimension of critic's action. Default: (1,).
hidden_layers (tuple of ints): Shape of the hidden layers. Default: (100, 100).
inj_action_layer (int): An index for the layer that will have `actions` injected as an additional input.
By default that's a first hidden layer, i.e. (state) -> (out + actions) -> (out) ... -> (output).
Default: 1.
Keyword arguments:
bias (bool): Whether to include bias in network's architecture. Default: True.
gate (callable): Activation function for each layer, expect the last. Default: Identity layer.
gate_out (callable): Activation function after the last layer. Default: Identity layer.
device: Device where to allocate memory. CPU or CUDA. Default CUDA if available.
"""
super().__init__()
self.in_features = tuple(in_features)
self.out_features = tuple(out_features)
num_layers = tuple(hidden_layers) if hidden_layers is not None else tuple()
num_layers = self.in_features + num_layers + self.out_features
self.actions_layer = inj_actions_layer
if not (0 <= inj_actions_layer < len(num_layers)):
raise ValueError("Action layer needs to be within the network")
bias = bool(kwargs.get('bias', True))
layers = []
for in_idx in range(len(num_layers)-1):
in_dim, out_dim = num_layers[in_idx], num_layers[in_idx+1]
if in_idx == inj_actions_layer: # Injects `actions` into specified (default: 2nd) layer of the Critic
in_dim += inj_action_size
layers.append(nn.Linear(in_dim, out_dim, bias=bias))
self.layers = nn.ModuleList(layers)
self.reset_parameters()
self.gate = kwargs.get('gate', nn.Identity())
self.gate_out = kwargs.get('gate_out', nn.Identity())
self.to(kwargs.get("device"))
def reset_parameters(self):
for layer in self.layers:
layer_init(layer, hidden_init(layer))
def forward(self, x, actions):
for idx, layer in enumerate(self.layers):
if idx == self.actions_layer:
x = layer(torch.cat((x, actions.float()), dim=-1))
else:
x = layer(x)
if idx < len(self.layers) - 1:
x = self.gate(x)
else:
x = self.gate_out(x)
return x
class NoisyLayer(nn.Module):
def __init__(self, in_features: FeatureType, out_features: FeatureType, sigma: float=0.4, factorised: bool=True):
"""
A linear layer with added noise perturbations in training as described in [1].
For a fully connected network of NoisyLayers see :class:`.NoisyNet`.
Parameters:
in_features (tuple ints): Dimension of the input.
out_features (tuple ints): Dimension of the output.
sigma (float): Used to intiated noise distribution. Default: 0.4.
factorised: Whether to use independent Gaussian (False) or Factorised Gaussian (True) noise.
Suggested [1] for DQN and Duelling nets to use factorised as it's quicker.
References:
.. [1] "Noisy Networks for Exploration" by Fortunato et al. (ICLR 2018), https://arxiv.org/abs/1706.10295.
"""
super(NoisyLayer, self).__init__()
assert len(in_features) == 1, "Expected only one dimension"
assert len(out_features) == 1, "Expected only one dimension"
self.in_features = in_features
self.out_features = out_features
self.sigma_0 = sigma
self.factorised = factorised
self.weight_mu = nn.Parameter(torch.zeros((out_features[0], in_features[0])))
self.weight_sigma = nn.Parameter(torch.zeros((out_features[0], in_features[0])))
self.bias_mu = nn.Parameter(torch.zeros(out_features[0]))
self.bias_sigma = nn.Parameter(torch.zeros(out_features[0]))
self.register_buffer('weight_eps', torch.zeros((out_features[0], in_features[0])))
self.register_buffer('bias_eps', torch.zeros(out_features[0]))
self.bias_noise = torch.zeros(out_features[0])
if factorised:
self.weight_noise = torch.zeros(in_features[0])
else:
self.weight_noise = torch.zeros(out_features[0], in_features[0])
self.reset_parameters()
self.reset_noise()
def forward(self, x) -> torch.Tensor:
weight = self.weight_mu
bias = self.bias_mu
if self.training:
weight = weight.add(self.weight_sigma.mul(self.weight_eps))
bias = bias.add(self.bias_sigma.mul(self.bias_eps))
return F.linear(x, weight, bias)
def reset_parameters(self) -> None:
if self.factorised:
bound = sqrt(1./self.in_features[0])
sigma = self.sigma_0 * bound
else:
bound = sqrt(3./self.in_features[0])
sigma = 0.017 # Yes, that's correct. [1]
self.weight_mu.data.uniform_(-bound, bound)
self.weight_sigma.data.fill_(sigma)
self.bias_mu.data.uniform_(-bound, bound)
self.bias_sigma.data.fill_(sigma)
def reset_noise(self):
self.bias_noise.normal_(std=self.sigma_0)
self.weight_noise.normal_(std=self.sigma_0)
if self.factorised:
# eps_i = ~P(b_size), eps_j = ~P(w_size)
# eps_b = f(eps_i)
# eps_w = f(eps_i) x f(eps_j)
f_weight_eps = self.noise_function(self.weight_noise)
f_bias_eps = self.noise_function(self.bias_noise)
self.weight_eps.copy_(f_bias_eps.outer(f_weight_eps))
self.bias_eps.copy_(f_bias_eps)
else:
self.weight_eps.copy_(self.weight_noise.data)
self.bias_eps.copy_(self.bias_noise.data)
@staticmethod
def noise_function(x):
return x.sign().mul_(x.abs().sqrt())
class NoisyNet(NetworkType):
def __init__(
self, in_features: FeatureType, out_features: FeatureType,
hidden_layers: Optional[Sequence[int]]=(100, 100), sigma=0.4,
factorised=True, **kwargs,
):
"""
Parameters:
in_features (tuple ints): Dimension of the input.
out_features (tuple ints): Dimension of the output.
hidden_layers (sequence ints): Sizes of latent layers. Size of sequence denotes number of hidden layers and
values of the sequence are nodes per layer. If None is passed then the input goes straight to output.
Default: (100, 100).
sigma (float): Variance value for generating noise in noisy layers. Default: 0.4 per layer.
factorised (bool): Whether to use independent Gaussian (False) or Factorised Gaussian (True) noise.
Suggested [1] for DQN and Duelling nets to use factorised as it's quicker.
Keyword arguments:
gate (callable): Function to apply after each layer pass. For the best performance it is suggested
to use non-linear functions such as tanh. Default: tanh.
gate_out (callable): Function to apply on network's exit. Default: identity.
device (str or torch.device): Whether and where to cast the network. Default is CUDA if available else cpu.
References:
.. [1] "Noisy Networks for Exploration" by Fortunato et al. (ICLR 2018), https://arxiv.org/abs/1706.10295.
"""
super(NoisyNet, self).__init__()
assert len(in_features) == 1, "Expected only one dimension"
assert len(out_features) == 1, "Expected only one dimension"
self.in_features = in_features
self.out_features = out_features
num_layers = list(hidden_layers) if hidden_layers is not None else []
num_layers = [self.in_features[0]] + num_layers + [self.out_features[0]]
layers = [NoisyLayer((dim_in,), (dim_out,), sigma=sigma, factorised=factorised) for dim_in, dim_out in zip(num_layers[:-1], num_layers[1:])]
self.layers = nn.ModuleList(layers)
self.gate = kwargs.get("gate", torch.tanh)
self.gate_out = kwargs.get("gate_out", nn.Identity())
if not callable(self.gate) or not callable(self.gate_out):
raise ValueError("Passed gate or gate_out is no callable and cannot be used as a function")
self.to(device=kwargs.get('device', None))
def reset_noise(self) -> None:
for layer in self.layers:
layer.reset_noise()
def reset_parameters(self) -> None:
for layer in self.layers:
layer.reset_parameters()
def forward(self, x) -> torch.Tensor:
for layer in self.layers[:-1]:
x = self.gate(layer(x))
return self.gate_out(self.layers[-1](x))
| [
"torch.zeros",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.ModuleList",
"torch.nn.MaxPool2d",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.linear",
"torch.nn.Conv2d"
] | 1.9.0 | laszukdawid/ai-traineree | af32940eba8e11012de87b60d78f10f5a3b96c79 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as f
class DoubleConvolution(nn.Module):
"""
Class used to initialize the conv 3x3, ReLu step.
"""
def __init__(self, in_channels: int, out_channels: int, mid_channels: int = None):
"""
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
mid_channels : int
Number if mid-layer channels
"""
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
"""
Preforms and returns results from the conv 3x3, ReLu step.
Parameters
----------
x : torch.tensor
Input data
"""
return self.double_conv(x)
class Down(nn.Module):
"""
Class used to initialize the max pool 2x2 step.
"""
def __init__(self, in_channels: int, out_channels: int):
"""
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
"""
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2), DoubleConvolution(in_channels, out_channels)
)
def forward(self, x):
"""
Preforms and returns results from the max pool 2x2 step.
Parameters
----------
x : torch.tensor
Input data
"""
return self.maxpool_conv(x)
class Up(nn.Module):
"""
Class used to initialize the up-conv 2x2 step.
"""
def __init__(self, in_channels: int, out_channels: int, bilinear: bool = True):
super().__init__()
"""
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
bilinear : bool
Bilinear interpolation in upsampling(default)
"""
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
self.conv = DoubleConvolution(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(
in_channels, in_channels // 2, kernel_size=2, stride=2
)
self.conv = DoubleConvolution(in_channels, out_channels)
def forward(self, x1, x2):
"""
Preforms and returns results from the up-conv 2x2 step.
Parameters
----------
x1 : torch.tensor
From
x2 : torch.tensor
To
"""
x1 = self.up(x1)
diff_y = x2.size()[2] - x1.size()[2]
diff_x = x2.size()[3] - x1.size()[3]
x1 = f.pad(
x1, [diff_x // 2, diff_x - diff_x // 2, diff_y // 2, diff_y - diff_y // 2]
)
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConvolution(nn.Module):
"""
Class used to initialize the conv 1x1 step.
"""
def __init__(self, in_channels: int, out_channels: int):
"""
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
"""
super(OutConvolution, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
"""
Preforms and returns results from the conv 1x1 step.
Parameters
----------
x : torch.tensor
Input data
"""
return self.conv(x)
| [
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.functional.pad"
] | 1.7.0 | gil-uav/semantic-image-segmentation | eaf29cda77f67e432756c3f594f3bf035e9c05c4 |
1.9 |
import torch.nn as nn
class FashionMNISTCNN(nn.Module):
def __init__(self):
super(FashionMNISTCNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.flatten = nn.Flatten()
self.fc = nn.Linear(7 * 7 * 32, 10)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.fc(self.flatten(x))
return x | [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Flatten"
] | 1.9.0 | ahreurink/fltk-testbed | f36581cb4a36e7d6c4d9c87618be67a77aeef13b |
1.6 | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch.nn as nn
import numpy as np
import torch, math
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_nonspade_norm_layer
import utils.inference.util as util
class MultiscaleDiscriminator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--netD_subarch', type=str, default='n_layer',
help='architecture of each discriminator')
parser.add_argument('--num_D', type=int, default=2,
help='number of discriminators to be used in multiscale')
opt, _ = parser.parse_known_args()
# define properties of each discriminator of the multiscale discriminator
subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator',
'models.networks.discriminator')
subnetD.modify_commandline_options(parser, is_train)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
for i in range(opt.num_D):
subnetD = self.create_single_discriminator(opt)
self.add_module('discriminator_%d' % i, subnetD)
def create_single_discriminator(self, opt):
subarch = opt.netD_subarch
if subarch == 'n_layer':
netD = NLayerDiscriminator(opt)
else:
raise ValueError('unrecognized discriminator subarchitecture %s' % subarch)
return netD
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3,
stride=2, padding=[1, 1],
count_include_pad=False)
# Returns list of lists of discriminator outputs.
# The final result is of size opt.num_D x opt.n_layers_D
def forward(self, input):
result = []
get_intermediate_features = not self.opt.no_ganFeat_loss
for name, D in self.named_children():
out = D(input)
if not get_intermediate_features:
out = [out]
result.append(out)
input = self.downsample(input)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--n_layers_D', type=int, default=4,
help='# layers in each discriminator')
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
nf = opt.ndf
input_nc = self.compute_D_input_nc(opt)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)
sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, False)]]
for n in range(1, opt.n_layers_D):
nf_prev = nf
nf = min(nf * 2, 512)
stride = 1 if n == opt.n_layers_D - 1 else 2
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,
stride=stride, padding=padw)),
nn.LeakyReLU(0.2, False)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
# We divide the layers into groups to extract intermediate layer outputs
for n in range(len(sequence)):
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
def compute_D_input_nc(self, opt):
input_nc = opt.label_nc + opt.output_nc
if opt.contain_dontcare_label:
input_nc += 1
if not opt.no_instance:
input_nc += 1
return input_nc
def forward(self, input):
results = [input]
for submodel in self.children():
intermediate_output = submodel(results[-1])
results.append(intermediate_output)
get_intermediate_features = not self.opt.no_ganFeat_loss
if get_intermediate_features:
return results[1:]
else:
return results[-1]
class ScaledLeakyReLU(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input):
out = F.leaky_relu(input, negative_slope=self.negative_slope)
return out * math.sqrt(2)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor ** 2)
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualConv2d(nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
super().__init__()
self.weight = nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
)
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
out = F.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
class ConvLayer(nn.Sequential):
def __init__(self, in_channel, out_channel, kernel_size,
downsample=False, blur_kernel=[1, 3, 3, 1],
bias=True, activate=True):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(in_channel, out_channel, kernel_size,
padding=self.padding, stride=stride, bias=bias and not activate)
)
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channel))
else:
layers.append(ScaledLeakyReLU(0.2))
super().__init__(*layers)
| [
"torch.zeros",
"torch.nn.functional.avg_pool2d",
"torch.nn.Sequential",
"torch.nn.LeakyReLU",
"torch.nn.Conv2d",
"torch.tensor",
"torch.nn.functional.conv2d",
"torch.randn",
"torch.nn.functional.leaky_relu"
] | 1.6.0 | ustato/sber-swap | 1140e085e165ed14e1098d81b7abd63feafedecf |
1.11 | import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.distributions import Categorical
def entropy(probs):
log_probs = -torch.log(probs)
entropy = -torch.sum(probs * log_probs, axis=-1, keepdim=True)
return entropy
class DenseDirichlet(nn.Module):
def __init__(self, in_dim, out_dim):
super(DenseDirichlet, self).__init__()
self.in_dim = int(in_dim)
self.out_features = int(out_dim)
self.dense = nn.Linear(self.in_dim, self.out_features)
def forward(self, x):
output = self.dense(x)
evidence = torch.exp(output)
alpha = evidence + 1
S = torch.unsqueeze(torch.sum(alpha, dim=1), -1)
K = alpha.shape[-1]
prob = alpha / S
epistemic_uncertainty = K / S
aleatoric_uncertainty = entropy(prob)
return alpha, prob, epistemic_uncertainty, aleatoric_uncertainty
class DenseSigmoid(nn.Module):
def __init__(self, in_dim, out_dim):
super(DenseSigmoid, self).__init__()
self.in_dim = int(in_dim)
self.out_dim = int(out_dim)
self.dense = nn.Linear(self.in_dim, self.out_dim)
def forward(self, x):
logits = self.dense(x)
prob = F.sigmoid(logits)
return [logits, prob]
| [
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.log",
"torch.exp",
"torch.sum"
] | 1.11.0 | Tuttusa/EvidentialDL | 7813c2705784bfeee21d25643259fd28d75b5f95 |
1.8 | import datetime
import logging
import os
import torch
from ..base.base_sampler import BaseSampler
from .rhvae_config import RHVAESamplerConfig
from .rhvae_model import RHVAE
logger = logging.getLogger(__name__)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logger.setLevel(logging.INFO)
class RHVAESampler(BaseSampler):
"""Hamiltonian Monte Carlo Sampler class.
This is an implementation of the Hamiltonian/Hybrid Monte Carlo sampler
(https://en.wikipedia.org/wiki/Hamiltonian_Monte_Carlo)
Args:
model (RHVAE): The VAE model to sample from
sampler_config (RHVAESamplerConfig): A HMCSamplerConfig instance containing the main
parameters of the sampler. If None, a pre-defined configuration is used. Default: None
"""
def __init__(self, model: RHVAE, sampler_config: RHVAESamplerConfig = None):
BaseSampler.__init__(self, model=model, sampler_config=sampler_config)
self.sampler_config = sampler_config
self.model.M_tens = self.model.M_tens.to(self.device)
self.model.centroids_tens = self.model.centroids_tens.to(self.device)
self.mcmc_steps_nbr = sampler_config.mcmc_steps_nbr
self.n_lf = torch.tensor([sampler_config.n_lf]).to(self.device)
self.eps_lf = torch.tensor([sampler_config.eps_lf]).to(self.device)
self.beta_zero_sqrt = (
torch.tensor([sampler_config.beta_zero]).to(self.device).sqrt()
)
self.log_pi = RHVAESampler.log_sqrt_det_G_inv
self.grad_func = RHVAESampler.grad_log_prop
def sample(self, samples_number):
"""
HMC sampling with a RHVAE.
The data is saved in the ``output_dir`` (folder passed in the
:class:`~pyraug.models.base.base_config.BaseSamplerConfig` instance) in a folder named
``generation_YYYY-MM-DD_hh-mm-ss``. If ``output_dir`` is None, a folder named
``dummy_output_dir`` is created in this folder.
Args:
num_samples (int): The number of samples to generate
"""
assert samples_number > 0, "Provide a number of samples > 0"
self._sampling_signature = (
str(datetime.datetime.now())[0:19].replace(" ", "_").replace(":", "-")
)
sampling_dir = os.path.join(
self.sampler_config.output_dir, f"generation_{self._sampling_signature}"
)
if not os.path.exists(sampling_dir):
os.makedirs(sampling_dir)
logger.info(
f"Created {sampling_dir}. "
"Generated data and sampler config will be saved here.\n"
)
full_batch_nbr = int(samples_number / self.sampler_config.batch_size)
last_batch_samples_nbr = samples_number % self.sampler_config.batch_size
generated_data = []
file_count = 0
data_count = 0
logger.info("Generation successfully launched !\n")
for i in range(full_batch_nbr):
samples = self.hmc_sampling(self.batch_size)
x_gen = self.model.decoder(z=samples).detach()
assert len(x_gen.shape) == 2
generated_data.append(x_gen)
data_count += self.batch_size
while data_count >= self.samples_per_save:
self.save_data_batch(
data=torch.cat(generated_data)[: self.samples_per_save],
dir_path=sampling_dir,
number_of_samples=self.samples_per_save,
batch_idx=file_count,
)
file_count += 1
data_count -= self.samples_per_save
generated_data = list(
torch.cat(generated_data)[self.samples_per_save :].unsqueeze(0)
)
if last_batch_samples_nbr > 0:
samples = self.hmc_sampling(last_batch_samples_nbr)
x_gen = self.model.decoder(z=samples).detach()
generated_data.append(x_gen)
data_count += last_batch_samples_nbr
while data_count >= self.samples_per_save:
self.save_data_batch(
data=torch.cat(generated_data)[: self.samples_per_save],
dir_path=sampling_dir,
number_of_samples=self.samples_per_save,
batch_idx=file_count,
)
file_count += 1
data_count -= self.samples_per_save
generated_data = list(
torch.cat(generated_data)[self.samples_per_save :].unsqueeze(0)
)
if data_count > 0:
self.save_data_batch(
data=torch.cat(generated_data),
dir_path=sampling_dir,
number_of_samples=data_count,
batch_idx=file_count,
)
self.save(sampling_dir)
def hmc_sampling(self, n_samples):
with torch.no_grad():
idx = torch.randint(len(self.model.centroids_tens), (n_samples,))
z0 = self.model.centroids_tens[idx]
beta_sqrt_old = self.beta_zero_sqrt
z = z0
for i in range(self.mcmc_steps_nbr):
gamma = torch.randn_like(z, device=self.device)
rho = gamma / self.beta_zero_sqrt
H0 = -self.log_pi(z, self.model) + 0.5 * torch.norm(rho, dim=1) ** 2
# print(model.G_inv(z).det())
for k in range(self.n_lf):
g = -self.grad_func(z, self.model).reshape(
n_samples, self.model.latent_dim
)
# step 1
rho_ = rho - (self.eps_lf / 2) * g
# step 2
z = z + self.eps_lf * rho_
g = -self.grad_func(z, self.model).reshape(
n_samples, self.model.latent_dim
)
# g = (Sigma_inv @ (z - mu).T).reshape(n_samples, 2)
# step 3
rho__ = rho_ - (self.eps_lf / 2) * g
# tempering
beta_sqrt = RHVAESampler.tempering(
k + 1, self.n_lf, self.beta_zero_sqrt
)
rho = (beta_sqrt_old / beta_sqrt) * rho__
beta_sqrt_old = beta_sqrt
H = -self.log_pi(z, self.model) + 0.5 * torch.norm(rho, dim=1) ** 2
alpha = torch.exp(-H) / (torch.exp(-H0))
acc = torch.rand(n_samples).to(self.device)
moves = (acc < alpha).type(torch.int).reshape(n_samples, 1)
z = z * moves + (1 - moves) * z0
z0 = z
return z
@staticmethod
def tempering(k, K, beta_zero_sqrt):
beta_k = ((1 - 1 / beta_zero_sqrt) * (k / K) ** 2) + 1 / beta_zero_sqrt
return 1 / beta_k
@staticmethod
def log_sqrt_det_G_inv(z, model):
return torch.log(torch.sqrt(torch.det(model.G_inv(z))) + 1e-10)
@staticmethod
def grad_log_sqrt_det_G_inv(z, model):
return (
-0.5
* torch.transpose(model.G(z), 1, 2)
@ torch.transpose(
(
-2
/ (model.temperature ** 2)
* (model.centroids_tens.unsqueeze(0) - z.unsqueeze(1)).unsqueeze(2)
@ (
model.M_tens.unsqueeze(0)
* torch.exp(
-torch.norm(
model.centroids_tens.unsqueeze(0) - z.unsqueeze(1),
dim=-1,
)
** 2
/ (model.temperature ** 2)
)
.unsqueeze(-1)
.unsqueeze(-1)
)
).sum(dim=1),
1,
2,
)
)
@staticmethod
def grad_log_prop(z, model):
def grad_func(z, model):
return RHVAESampler.grad_log_sqrt_det_G_inv(z, model)
return grad_func(z, model)
| [
"torch.rand",
"torch.cat",
"torch.norm",
"torch.no_grad",
"torch.randn_like",
"torch.tensor",
"torch.exp"
] | 1.8.1 | clementchadebec/pyraug | d1b36c060fe56427ed158ecb38cdbc6cc3bc0f74 |
0.4 | import torch
import torch.nn as nn
from . import resnet, resnext, mobilenet, hrnet
from mit_semseg.lib.nn import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
_, preds = torch.max(pred, dim=1)
valid = (label >= 0).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit = crit
self.deep_sup_scale = deep_sup_scale
def forward(self, feed_dict, *, segSize=None):
# training
if segSize is None:
if self.deep_sup_scale is not None: # use deep supervision technique
(pred, pred_deepsup) = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
else:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
loss = self.crit(pred, feed_dict['seg_label'])
if self.deep_sup_scale is not None:
loss_deepsup = self.crit(pred_deepsup, feed_dict['seg_label'])
loss = loss + loss_deepsup * self.deep_sup_scale
acc = self.pixel_acc(pred, feed_dict['seg_label'])
return loss, acc
# inference
else:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True), segSize=segSize)
return pred
class ModelBuilder:
# custom weights initialization
@staticmethod
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
#elif classname.find('Linear') != -1:
# m.weight.data.normal_(0.0, 0.0001)
@staticmethod
def build_encoder(arch='resnet50dilated', fc_dim=512, weights=''):
pretrained = True if len(weights) == 0 else False
arch = arch.lower()
if arch == 'mobilenetv2dilated':
orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=pretrained)
net_encoder = MobileNetV2Dilated(orig_mobilenet, dilate_scale=8)
elif arch == 'resnet18':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet18dilated':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
elif arch == 'resnet34':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet34dilated':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
elif arch == 'resnet50':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet50dilated':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
elif arch == 'resnet101':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet101dilated':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
elif arch == 'resnext101':
orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnext) # we can still use class Resnet
elif arch == 'hrnetv2':
net_encoder = hrnet.__dict__['hrnetv2'](pretrained=pretrained)
else:
raise Exception('Architecture undefined!')
# encoders are usually pretrained
# net_encoder.apply(ModelBuilder.weights_init)
if len(weights) > 0:
print('Loading weights for net_encoder')
net_encoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_encoder
@staticmethod
def build_decoder(arch='ppm_deepsup',
fc_dim=512, num_class=150,
weights='', use_softmax=False):
arch = arch.lower()
if arch == 'c1_deepsup':
net_decoder = C1DeepSup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1':
net_decoder = C1(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm':
net_decoder = PPM(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_deepsup':
net_decoder = PPMDeepsup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'upernet_lite':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=256)
elif arch == 'upernet':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
else:
raise Exception('Architecture undefined!')
net_decoder.apply(ModelBuilder.weights_init)
if len(weights) > 0:
print('Loading weights for net_decoder')
net_decoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_decoder
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
"3x3 convolution + BN + relu"
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=1, bias=False),
BatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class Resnet(nn.Module):
def __init__(self, orig_resnet):
super(Resnet, self).__init__()
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, dilate_scale=8):
super(ResnetDilated, self).__init__()
from functools import partial
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
class MobileNetV2Dilated(nn.Module):
def __init__(self, orig_net, dilate_scale=8):
super(MobileNetV2Dilated, self).__init__()
from functools import partial
# take pretrained mobilenet features
self.features = orig_net.features[:-1]
self.total_idx = len(self.features)
self.down_idx = [2, 4, 7, 14]
if dilate_scale == 8:
for i in range(self.down_idx[-2], self.down_idx[-1]):
self.features[i].apply(
partial(self._nostride_dilate, dilate=2)
)
for i in range(self.down_idx[-1], self.total_idx):
self.features[i].apply(
partial(self._nostride_dilate, dilate=4)
)
elif dilate_scale == 16:
for i in range(self.down_idx[-1], self.total_idx):
self.features[i].apply(
partial(self._nostride_dilate, dilate=2)
)
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, return_feature_maps=False):
if return_feature_maps:
conv_out = []
for i in range(self.total_idx):
x = self.features[i](x)
if i in self.down_idx:
conv_out.append(x)
conv_out.append(x)
return conv_out
else:
return [self.features(x)]
# last conv, deep supervision
class C1DeepSup(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1DeepSup, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.conv_last_deepsup(_)
x = nn.functional.log_softmax(x, dim=1)
_ = nn.functional.log_softmax(_, dim=1)
return (x, _)
# last conv
class C1(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.log_softmax(x, dim=1)
return x
# pyramid pooling
class PPM(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPM, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(512, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.log_softmax(x, dim=1)
return x
# pyramid pooling, deep supervision
class PPMDeepsup(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPMDeepsup, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(512, num_class, kernel_size=1)
)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.dropout_deepsup = nn.Dropout2d(0.1)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.dropout_deepsup(_)
_ = self.conv_last_deepsup(_)
x = nn.functional.log_softmax(x, dim=1)
_ = nn.functional.log_softmax(_, dim=1)
return (x, _)
# upernet
class UPerNet(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256, 512, 1024, 2048), fpn_dim=256):
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
BatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(
conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = nn.functional.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(nn.functional.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_last(fusion_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
x = nn.functional.log_softmax(x, dim=1)
return x
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.max",
"torch.nn.functional.interpolate",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.log_softmax",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load",
"torch.nn.functional.softmax",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Dropout2d",
"torch.sum"
] | 0.4.1 | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 |
0.4 | """
This HRNet implementation is modified from the following repository:
https://github.com/HRNet/HRNet-Semantic-Segmentation
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import load_url
from mit_semseg.lib.nn import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
__all__ = ['hrnetv2']
model_urls = {
'hrnetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/hrnetv2_w48-imagenet.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=(height_output, width_output),
mode='bilinear',
align_corners=False)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HRNetV2(nn.Module):
def __init__(self, n_class, **kwargs):
super(HRNetV2, self).__init__()
extra = {
'STAGE2': {'NUM_MODULES': 1, 'NUM_BRANCHES': 2, 'BLOCK': 'BASIC', 'NUM_BLOCKS': (4, 4), 'NUM_CHANNELS': (48, 96), 'FUSE_METHOD': 'SUM'},
'STAGE3': {'NUM_MODULES': 4, 'NUM_BRANCHES': 3, 'BLOCK': 'BASIC', 'NUM_BLOCKS': (4, 4, 4), 'NUM_CHANNELS': (48, 96, 192), 'FUSE_METHOD': 'SUM'},
'STAGE4': {'NUM_MODULES': 3, 'NUM_BRANCHES': 4, 'BLOCK': 'BASIC', 'NUM_BLOCKS': (4, 4, 4, 4), 'NUM_CHANNELS': (48, 96, 192, 384), 'FUSE_METHOD': 'SUM'},
'FINAL_CONV_KERNEL': 1
}
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 64, 4)
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([256], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(
num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x, return_feature_maps=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(
x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=False)
x2 = F.interpolate(
x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=False)
x3 = F.interpolate(
x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=False)
x = torch.cat([x[0], x1, x2, x3], 1)
# x = self.last_layer(x)
return [x]
def hrnetv2(pretrained=False, **kwargs):
model = HRNetV2(n_class=1000, **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['hrnetv2']), strict=False)
return model
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 0.4.1 | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 |
0.4 | import torch
import torch.multiprocessing as multiprocessing
from torch._C import _set_worker_signal_handlers, \
_remove_worker_pids, _error_if_any_worker_fails
try:
from torch._C import _set_worker_pids
except:
from torch._C import _update_worker_pids as _set_worker_pids
from .sampler import SequentialSampler, RandomSampler, BatchSampler
import signal
import collections
import re
import sys
import threading
import traceback
from torch._six import string_classes, int_classes
import numpy as np
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
class ExceptionWrapper(object):
r"Wraps an exception plus traceback to communicate across threads"
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
_use_shared_memory = False
"""Whether to use shared memory in default_collate"""
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id):
global _use_shared_memory
_use_shared_memory = True
# Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal happened again already.
# https://docs.python.org/3/library/signal.html Sec. 18.8.1.1
_set_worker_signal_handlers()
torch.set_num_threads(1)
torch.manual_seed(seed)
np.random.seed(seed)
if init_fn is not None:
init_fn(worker_id)
while True:
r = index_queue.get()
if r is None:
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
if pin_memory:
torch.cuda.set_device(device_id)
while True:
try:
r = in_queue.get()
except Exception:
if done_event.is_set():
return
raise
if r is None:
break
if isinstance(r[1], ExceptionWrapper):
out_queue.put(r)
continue
idx, batch = r
try:
if pin_memory:
batch = pin_memory_batch(batch)
except Exception:
out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
out_queue.put((idx, batch))
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def default_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def pin_memory_batch(batch):
if torch.is_tensor(batch):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for k, sample in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch
_SIGCHLD_handler_set = False
"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
def _set_SIGCHLD_handler():
# Windows doesn't support SIGCHLD handler
if sys.platform == 'win32':
return
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread):
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if not callable(previous_handler):
previous_handler = None
def handler(signum, frame):
# This following call uses `waitid` with WNOHANG from C side. Therefore,
# Python can still get and update the process status successfully.
_error_if_any_worker_fails()
if previous_handler is not None:
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True
class DataLoaderIter(object):
"Iterates once over the DataLoader's dataset, as specified by the sampler"
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.index_queue = multiprocessing.SimpleQueue()
self.worker_result_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
base_seed = torch.LongTensor(1).random_(0, 2**31-1)[0]
self.workers = [
multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn,
base_seed + i, self.worker_init_fn, i))
for i in range(self.num_workers)]
if self.pin_memory or self.timeout > 0:
self.data_queue = queue.Queue()
if self.pin_memory:
maybe_device_id = torch.cuda.current_device()
else:
# do not initialize cuda context if not necessary
maybe_device_id = None
self.worker_manager_thread = threading.Thread(
target=_worker_manager_loop,
args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory,
maybe_device_id))
self.worker_manager_thread.daemon = True
self.worker_manager_thread.start()
else:
self.data_queue = self.worker_result_queue
for w in self.workers:
w.daemon = True # ensure that the worker exits on process exit
w.start()
_set_worker_pids(id(self), tuple(w.pid for w in self.workers))
_set_SIGCHLD_handler()
self.worker_pids_set = True
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __len__(self):
return len(self.batch_sampler)
def _get_batch(self):
if self.timeout > 0:
try:
return self.data_queue.get(timeout=self.timeout)
except queue.Empty:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
else:
return self.data_queue.get()
def __next__(self):
if self.num_workers == 0: # same-process loading
indices = next(self.sample_iter) # may raise StopIteration
batch = self.collate_fn([self.dataset[i] for i in indices])
if self.pin_memory:
batch = pin_memory_batch(batch)
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self._get_batch()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
def _put_indices(self):
assert self.batches_outstanding < 2 * self.num_workers
indices = next(self.sample_iter, None)
if indices is None:
return
self.index_queue.put((self.send_idx, indices))
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, ExceptionWrapper):
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("DataLoaderIterator cannot be pickled")
def _shutdown_workers(self):
try:
if not self.shutdown:
self.shutdown = True
self.done_event.set()
# if worker_manager_thread is waiting to put
while not self.data_queue.empty():
self.data_queue.get()
for _ in self.workers:
self.index_queue.put(None)
# done_event should be sufficient to exit worker_manager_thread,
# but be safe here and put another None
self.worker_result_queue.put(None)
finally:
# removes pids no matter what
if self.worker_pids_set:
_remove_worker_pids(id(self))
self.worker_pids_set = False
def __del__(self):
if self.num_workers > 0:
self._shutdown_workers()
class DataLoader(object):
"""
Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a mini-batch.
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: False)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: 0)
worker_init_fn (callable, optional): If not None, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: None)
.. note:: By default, each worker will have its PyTorch seed set to
``base_seed + worker_id``, where ``base_seed`` is a long generated
by main process using its RNG. You may use ``torch.initial_seed()`` to access
this value in :attr:`worker_init_fn`, which can be used to set other seeds
(e.g. NumPy) before data loading.
.. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an
unpicklable object, e.g., a lambda function.
"""
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.drop_last = drop_last
self.timeout = timeout
self.worker_init_fn = worker_init_fn
if timeout < 0:
raise ValueError('timeout option should be non-negative')
if batch_sampler is not None:
if batch_size > 1 or shuffle or sampler is not None or drop_last:
raise ValueError('batch_sampler is mutually exclusive with '
'batch_size, shuffle, sampler, and drop_last')
if sampler is not None and shuffle:
raise ValueError('sampler is mutually exclusive with shuffle')
if self.num_workers < 0:
raise ValueError('num_workers cannot be negative; '
'use num_workers=0 to disable multiprocessing.')
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
def __iter__(self):
return DataLoaderIter(self)
def __len__(self):
return len(self.batch_sampler)
| [
"torch.multiprocessing.Process",
"torch.stack",
"torch.is_tensor",
"torch.cuda.current_device",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.DoubleTensor",
"torch.cuda.is_available",
"torch._C._set_worker_signal_handlers",
"torch.LongTensor",
"torch.from_numpy",
"torch.multiprocessing.SimpleQueue",
"torch._C._error_if_any_worker_fails",
"torch.set_num_threads"
] | 0.4.1 | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 |
1.4 | """ Twins
A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers`
- https://arxiv.org/pdf/2104.13840.pdf
Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below
"""
# --------------------------------------------------------
# Twins
# Copyright (c) 2021 Meituan
# Licensed under The Apache 2.0 License [see LICENSE for details]
# Written by Xinjie Li, Xiangxiang Chu
# --------------------------------------------------------
import math
from copy import deepcopy
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .layers import Mlp, DropPath, to_2tuple, trunc_normal_
from .registry import register_model
from .vision_transformer import Attention
from .helpers import build_model_with_cfg, overlay_external_default_cfg
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embeds.0.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'twins_pcpvt_small': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth',
),
'twins_pcpvt_base': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth',
),
'twins_pcpvt_large': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth',
),
'twins_svt_small': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth',
),
'twins_svt_base': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth',
),
'twins_svt_large': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth',
),
}
Size_ = Tuple[int, int]
class LocallyGroupedAttn(nn.Module):
""" LSA: self attention within a group
"""
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1):
assert ws != 1
super(LocallyGroupedAttn, self).__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=True)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.ws = ws
def forward(self, x, size: Size_):
# There are two implementations for this function, zero padding or mask. We don't observe obvious difference for
# both. You can choose any one, we recommend forward_padding because it's neat. However,
# the masking implementation is more reasonable and accurate.
B, N, C = x.shape
H, W = size
x = x.view(B, H, W, C)
pad_l = pad_t = 0
pad_r = (self.ws - W % self.ws) % self.ws
pad_b = (self.ws - H % self.ws) % self.ws
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
_h, _w = Hp // self.ws, Wp // self.ws
x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3)
qkv = self.qkv(x).reshape(
B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# def forward_mask(self, x, size: Size_):
# B, N, C = x.shape
# H, W = size
# x = x.view(B, H, W, C)
# pad_l = pad_t = 0
# pad_r = (self.ws - W % self.ws) % self.ws
# pad_b = (self.ws - H % self.ws) % self.ws
# x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
# _, Hp, Wp, _ = x.shape
# _h, _w = Hp // self.ws, Wp // self.ws
# mask = torch.zeros((1, Hp, Wp), device=x.device)
# mask[:, -pad_b:, :].fill_(1)
# mask[:, :, -pad_r:].fill_(1)
#
# x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C
# mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws)
# attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws
# attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0))
# qkv = self.qkv(x).reshape(
# B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
# # n_h, B, _w*_h, nhead, ws*ws, dim
# q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head
# attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws
# attn = attn + attn_mask.unsqueeze(2)
# attn = attn.softmax(dim=-1)
# attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head
# attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
# x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
# if pad_r > 0 or pad_b > 0:
# x = x[:, :H, :W, :].contiguous()
# x = x.reshape(B, N, C)
# x = self.proj(x)
# x = self.proj_drop(x)
# return x
class GlobalSubSampleAttn(nn.Module):
""" GSA: using a key to summarize the information for a group to be efficient.
"""
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=True)
self.kv = nn.Linear(dim, dim * 2, bias=True)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
else:
self.sr = None
self.norm = None
def forward(self, x, size: Size_):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr is not None:
x = x.permute(0, 2, 1).reshape(B, C, *size)
x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)
x = self.norm(x)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None):
super().__init__()
self.norm1 = norm_layer(dim)
if ws is None:
self.attn = Attention(dim, num_heads, False, None, attn_drop, drop)
elif ws == 1:
self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio)
else:
self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, size: Size_):
x = x + self.drop_path(self.attn(self.norm1(x), size))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PosConv(nn.Module):
# PEG from https://arxiv.org/abs/2102.10882
def __init__(self, in_chans, embed_dim=768, stride=1):
super(PosConv, self).__init__()
self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), )
self.stride = stride
def forward(self, x, size: Size_):
B, N, C = x.shape
cnn_feat_token = x.transpose(1, 2).view(B, C, *size)
x = self.proj(cnn_feat_token)
if self.stride == 1:
x += cnn_feat_token
x = x.flatten(2).transpose(1, 2)
return x
def no_weight_decay(self):
return ['proj.%d.weight' % i for i in range(4)]
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
f"img_size {img_size} should be divided by patch_size {patch_size}."
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x) -> Tuple[torch.Tensor, Size_]:
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
out_size = (H // self.patch_size[0], W // self.patch_size[1])
return x, out_size
class Twins(nn.Module):
""" Twins Vision Transfomer (Revisiting Spatial Attention)
Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git
"""
def __init__(
self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dims=(64, 128, 256, 512),
num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None,
block_cls=Block):
super().__init__()
self.num_classes = num_classes
self.depths = depths
img_size = to_2tuple(img_size)
prev_chs = in_chans
self.patch_embeds = nn.ModuleList()
self.pos_drops = nn.ModuleList()
for i in range(len(depths)):
self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i]))
self.pos_drops.append(nn.Dropout(p=drop_rate))
prev_chs = embed_dims[i]
img_size = tuple(t // patch_size for t in img_size)
patch_size = 2
self.blocks = nn.ModuleList()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
for k in range(len(depths)):
_block = nn.ModuleList([block_cls(
dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k],
ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])])
self.blocks.append(_block)
cur += depths[k]
self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims])
self.norm = norm_layer(embed_dims[-1])
# classification head
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
# init weights
self.apply(self._init_weights)
@torch.jit.ignore
def no_weight_decay(self):
return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()])
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
def forward_features(self, x):
B = x.shape[0]
for i, (embed, drop, blocks, pos_blk) in enumerate(
zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)):
x, size = embed(x)
x = drop(x)
for j, blk in enumerate(blocks):
x = blk(x, size)
if j == 0:
x = pos_blk(x, size) # PEG here
if i < len(self.depths) - 1:
x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()
x = self.norm(x)
return x.mean(dim=1) # GAP here
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _create_twins(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
Twins, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
return model
@register_model
def twins_pcpvt_small(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs)
return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs)
@register_model
def twins_pcpvt_base(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs)
return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs)
@register_model
def twins_pcpvt_large(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs)
return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs)
@register_model
def twins_svt_small(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)
return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs)
@register_model
def twins_svt_base(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)
return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs)
@register_model
def twins_svt_large(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)
return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Identity",
"torch.nn.ModuleList",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.functional.pad"
] | 1.4.0 | visualCalculus/pytorch-image-models | 54a6cca27a9a3e092a07457f5d56709da56e3cf5 |
0.2 | import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import os
from omegaconf import DictConfig
from src.utils.technical_utils import load_obj
class VentilatorRegression(pl.LightningModule):
def __init__(self, cfg: DictConfig):
super(VentilatorRegression, self).__init__()
self.cfg = cfg
self.model = load_obj(cfg.model.class_name)(**self.cfg.model.params)
print(self.model)
if 'params' in self.cfg.loss:
self.loss = load_obj(cfg.loss.class_name)(**self.cfg.loss.params)
else:
self.loss = load_obj(cfg.loss.class_name)()
self.metrics = torch.nn.ModuleDict(
{
self.cfg.metric.metric.metric_name: load_obj(self.cfg.metric.metric.class_name)()
}
)
if 'other_metrics' in self.cfg.metric.keys():
for metric in self.cfg.metric.other_metrics:
self.metrics.update({metric.metric_name: load_obj(metric.class_name)()})
print(f'{self.metrics=}')
self.best_ventilator_mae = torch.tensor(1000)
if self.cfg.training.pp_for_loss:
train = pd.read_csv(os.path.join(self.cfg.datamodule.path, 'train.csv'))
all_pressure = sorted(train['pressure'].unique())
self.pressure_min = torch.tensor(all_pressure[0], device=self.device)
self.pressure_max = torch.tensor(all_pressure[-1], device=self.device)
self.pressure_step = all_pressure[1] - all_pressure[0]
def forward(self, x, *args, **kwargs):
return self.model(x)
def configure_optimizers(self):
if 'decoder_lr' in self.cfg.optimizer.params.keys():
params = [
{'params': self.model.decoder.parameters(), 'lr': self.cfg.optimizer.params.lr},
{'params': self.model.encoder.parameters(), 'lr': self.cfg.optimizer.params.decoder_lr},
]
optimizer = load_obj(self.cfg.optimizer.class_name)(params)
else:
optimizer = load_obj(self.cfg.optimizer.class_name)(self.model.parameters(), **self.cfg.optimizer.params)
scheduler = load_obj(self.cfg.scheduler.class_name)(optimizer, **self.cfg.scheduler.params)
return (
[optimizer],
[{'scheduler': scheduler, 'interval': self.cfg.scheduler.step, 'monitor': self.cfg.scheduler.monitor,
'name': self.cfg.scheduler.class_name}],
)
def training_step(self, batch, *args, **kwargs): # type: ignore
# data = batch['input']
# pred = self(data).squeeze(-1)
pred, pred1, pred2 = self(batch)
pred, pred1, pred2 = pred.squeeze(-1), pred1.squeeze(-1), pred2.squeeze(-1)
if self.cfg.loss.class_name == 'torch.nn.L1Loss' or self.cfg.loss.class_name == 'torch.nn.HuberLoss':
loss = (self.loss(pred, batch['p']).mean() + self.loss(pred1, batch['p1']).mean() + self.loss(pred2, batch['p2']).mean()) / 3
else:
loss = (self.loss(pred, batch['p'], batch['u_out']).mean() + self.loss(pred1, batch['p1'], batch['u_out']).mean() + self.loss(pred2, batch['p2'], batch['u_out']).mean()) / 3
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=False, logger=True)
for metric in self.metrics:
if metric == 'mae':
score = self.metrics[metric](pred, batch['p']).mean()
else:
score = self.metrics[metric](pred, batch['p'], batch['u_out']).mean()
self.log(f'train_{metric}', score, on_step=True, on_epoch=True, prog_bar=False, logger=True)
return loss
def validation_step(self, batch, *args, **kwargs): # type: ignore
# data = batch['input']
# pred = self(data).squeeze(-1)
pred, pred1, pred2 = self(batch)
pred, pred1, pred2 = pred.squeeze(-1), pred1.squeeze(-1), pred2.squeeze(-1)
if self.cfg.loss.class_name == 'torch.nn.L1Loss' or self.cfg.loss.class_name == 'torch.nn.HuberLoss':
loss = (self.loss(pred, batch['p']).mean() + self.loss(pred1, batch['p1']).mean() + self.loss(pred2, batch['p2']).mean()) / 3
else:
loss = (self.loss(pred, batch['p'], batch['u_out']).mean() + self.loss(pred1, batch['p1'], batch['u_out']).mean() + self.loss(pred2, batch['p2'], batch['u_out']).mean()) / 3
self.log('valid_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
for metric in self.metrics:
if metric == 'mae':
score = self.metrics[metric](pred, batch['p']).mean()
else:
score = self.metrics[metric](pred, batch['p'], batch['u_out']).mean()
if metric == 'ventilator_mae':
self.best_ventilator_mae = score if score < self.best_ventilator_mae else self.best_ventilator_mae
self.log('best_ventilator_mae', self.best_ventilator_mae, on_step=False, on_epoch=True, prog_bar=True,
logger=True)
self.log(f'valid_{metric}', score, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def predict_step(self, batch, *args, **kwargs): # type: ignore
# data = batch['input']
return self(batch)[0].squeeze(-1)
# def training_epoch_end(self, outputs):
# avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
# y_true = torch.cat([x['target'] for x in outputs])
# y_pred = torch.cat([x['logits'] for x in outputs])
# score = self.metric(y_pred.argmax(1), y_true)
#
# # score = torch.tensor(1.0, device=self.device)
#
# logs = {'train_loss': avg_loss, f'train_{self.cfg.training.metric}': score}
# return {'log': logs, 'progress_bar': logs} | [
"torch.tensor"
] | 0.2.1 | Erlemar/ventilator_kaggle_models | 216e5fcfde28cd20773d0ccf996fff3ff1775921 |
1.5 | """
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
from typing import Dict, Tuple, Sequence, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fuse.utils.utils_hierarchical_dict import FuseUtilsHierarchicalDict
from fuse.models.heads.common import ClassifierMLP
class FuseHead3dClassifier(nn.Module):
"""
Model that capture slice feature including the 3D context given the local feature about a slice.
"""
def __init__(self, head_name: str = 'head_0',
conv_inputs: Sequence[Tuple[str, int]] = (('model.backbone_features', 512),),
dropout_rate: float = 0.1,
num_classes: int = 3,
append_features: Optional[Tuple[str, int]] = None,
layers_description: Sequence[int] = (256,),
append_layers_description: Sequence[int] = tuple(),
append_dropout_rate: float = 0.0,
fused_dropout_rate: float = 0.0,
) -> None:
"""
Create simple 3D context model
:param head_name: string representing the head name
:param conv_inputs: Sequence of tuples, each indication features name in batch_dict and size of features (channels)
:param dropout_rate: dropout fraction
:param num_classes: number of output classes
:param append_features: Sequence of tuples, each indication features name in batch_dict and size of features (channels).
Those are global features that appended after the global max pooling operation
:param layers_description: Layers description for the classifier module - sequence of hidden layers sizes (Not used currently)
:param append_layers_description: Layers description for the tabular data, before the concatination with the features extracted from the image - sequence of hidden layers sizes
:param append_dropout_rate: Dropout rate for tabular layers
"""
super().__init__()
# save input params
self.head_name = head_name
self.conv_inputs = conv_inputs
self.dropout_rate = dropout_rate
self.num_classes = num_classes
self.append_features = append_features
self.gmp = nn.AdaptiveMaxPool3d(output_size=1)
self.features_size = sum([features[1] for features in self.conv_inputs]) if self.conv_inputs is not None else 0
# calc appended feature size if used
if self.append_features is not None:
if len(append_layers_description) == 0:
self.features_size += sum([post_concat_input[1] for post_concat_input in append_features])
self.append_features_module = nn.Identity()
else:
self.features_size += append_layers_description[-1]
self.append_features_module = ClassifierMLP(in_ch=sum([post_concat_input[1] for post_concat_input in append_features]),
num_classes=None,
layers_description=append_layers_description,
dropout_rate=append_dropout_rate)
self.conv_classifier_3d = nn.Sequential(
nn.Conv3d(self.features_size, 256, kernel_size=1),
nn.ReLU(),
nn.Dropout3d(p=fused_dropout_rate),
nn.Conv3d(256, self.num_classes, kernel_size=1),
)
self.do = nn.Dropout3d(p=self.dropout_rate)
def forward(self, batch_dict: Dict) -> Dict:
"""
Forward pass
:param batch_dict: dictionary containing an input tensor representing spatial features with 3D context. shape: [batch_size, in_features, z, y, x]
:return: batch dict with fields model.outputs and model.logits
"""
if self.conv_inputs is not None:
conv_input = torch.cat(
[FuseUtilsHierarchicalDict.get(batch_dict, conv_input[0]) for conv_input in self.conv_inputs], dim=1)
global_features = self.gmp(conv_input)
# save global max pooling features in case needed (mostly to analyze)
FuseUtilsHierarchicalDict.set(batch_dict, 'model.' + self.head_name +'.gmp_features', global_features.squeeze(dim=4).squeeze(dim=3).squeeze(dim=2))
# backward compatibility
if hasattr(self, 'do'):
global_features = self.do(global_features)
# append global features if are used
if self.append_features is not None:
features = torch.cat(
[FuseUtilsHierarchicalDict.get(batch_dict, features[0]).reshape(-1, features[1]) for features in self.append_features], dim=1)
features = self.append_features_module(features)
features = features.reshape(features.shape + (1,1,1))
if self.conv_inputs is not None:
global_features = torch.cat((global_features, features), dim=1)
else:
global_features = features
logits = self.conv_classifier_3d(global_features)
logits = logits.squeeze(dim=4)
logits = logits.squeeze(dim=3)
logits = logits.squeeze(dim=2) # squeeze will change the shape to [batch_size, channels']
cls_preds = F.softmax(logits, dim=1)
FuseUtilsHierarchicalDict.set(batch_dict, 'model.logits.' + self.head_name, logits)
FuseUtilsHierarchicalDict.set(batch_dict, 'model.output.' + self.head_name, cls_preds)
return batch_dict
| [
"torch.nn.Identity",
"torch.cat",
"torch.nn.ReLU",
"torch.nn.Dropout3d",
"torch.nn.Conv3d",
"torch.nn.functional.softmax",
"torch.nn.AdaptiveMaxPool3d"
] | 1.5.0 | afoncubierta/fuse-med-ml | 2c502c018635f138f00e017f243fd73154abdec2 |
1.5 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
from tqdm.auto import tqdm, trange
from seqeval.metrics import f1_score, precision_score, recall_score
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler
from transformers import AutoConfig, AutoTokenizer
from transformers.data.data_collator import default_data_collator
from transformers import set_seed
from .trainer import IETrainer as Trainer
from chemrxnextractor.models import BertForRoleLabeling, BertCRFForRoleLabeling
from chemrxnextractor.data import RoleDataset, PlainRoleDataset
from chemrxnextractor.data.utils import get_labels
from chemrxnextractor.constants import PROD_START_MARKER, PROD_END_MARKER
from chemrxnextractor.data.role import write_predictions
from chemrxnextractor.utils import create_logger
logger = logging.getLogger(__name__)
SPECIAL_TOKENS = [PROD_START_MARKER, PROD_END_MARKER]
def train(model_args, data_args, train_args):
if (
os.path.exists(train_args.output_dir)
and os.listdir(train_args.output_dir)
and train_args.do_train
and not train_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({train_args.output_dir}) already exists and is not empty."
" Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if train_args.local_rank in [-1, 0] else logging.WARN,
)
# logger = create_logger(name="train_role", save_dir=train_args.output_dir)
logger.info("Training/evaluation parameters %s", train_args)
# Set seed
set_seed(train_args.seed)
labels = get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
additional_special_tokens=SPECIAL_TOKENS
)
if model_args.use_crf:
model = BertCRFForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
tagging_schema="BIO",
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler
)
else:
model = BertForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler
)
model.resize_token_embeddings(len(tokenizer))
# Get datasets
train_dataset = (
RoleDataset(
data_file=os.path.join(data_args.data_dir, "train.txt"),
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache
)
if train_args.do_train
else None
)
eval_dataset = (
RoleDataset(
data_file=os.path.join(data_args.data_dir, "dev.txt"),
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache
)
if train_args.do_eval
else None
)
def compute_metrics(predictions, label_ids) -> Dict:
label_list = [[label_map[x] for x in seq] for seq in label_ids]
preds_list = [[label_map[x] for x in seq] for seq in predictions]
return {
"precision": precision_score(label_list, preds_list),
"recall": recall_score(label_list, preds_list),
"f1": f1_score(label_list, preds_list),
}
metrics_fn = compute_metrics
# Initialize our Trainer
trainer = Trainer(
model=model,
args=train_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=metrics_fn,
use_crf=model_args.use_crf
)
# Training
if train_args.do_train:
trainer.train()
# Pass model_path to train() if continue training from an existing ckpt.
# trainer.train(
# model_path=model_args.model_name_or_path
# if os.path.isdir(model_args.model_name_or_path)
# else None
# )
trainer.save_model()
tokenizer.save_pretrained(train_args.output_dir)
# Evaluation
if train_args.do_eval:
logger.info("*** Evaluate ***")
output = trainer.evaluate()
predictions = output['predictions']
label_ids = output['label_ids']
metrics = output["metrics"]
output_eval_file = os.path.join(train_args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
preds_list = [[label_map[x] for x in seq] for seq in predictions]
# Save predictions
write_predictions(
os.path.join(data_args.data_dir, "dev.txt"),
os.path.join(train_args.output_dir, "eval_predictions.txt"),
preds_list
)
# Predict
if train_args.do_predict:
test_dataset = RoleDataset(
data_file=os.path.join(data_args.data_dir, "test.txt"),
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache
)
output = trainer.predict(test_dataset)
predictions = output['predictions']
label_ids = output['label_ids']
metrics = output["metrics"]
# Note: preds_list doesn't contain labels for [Prod] and [/Prod]
preds_list = [[label_map[x] for x in seq] for seq in predictions]
output_test_results_file = os.path.join(train_args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
write_predictions(
os.path.join(data_args.data_dir, "test.txt"),
os.path.join(train_args.output_dir, "test_predictions.txt"),
preds_list
)
def predict(model_args, predict_args):
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# logger = create_logger(name="predict_role", save_dir=train_args.output_dir)
logger.info("Predict parameters %s", predict_args)
# Prepare prod-ext task
labels = get_labels(predict_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
additional_special_tokens=SPECIAL_TOKENS
)
if model_args.use_crf:
model = BertCRFForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
tagging_schema="BIO",
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler,
)
else:
model = BertForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler,
)
device = torch.device(
"cuda"
if (not predict_args.no_cuda and torch.cuda.is_available())
else "cpu"
)
model = model.to(device)
# load test dataset
test_dataset = PlainRoleDataset(
data_file=predict_args.input_file,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=predict_args.max_seq_length,
overwrite_cache=predict_args.overwrite_cache,
)
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=predict_args.batch_size,
collate_fn=default_data_collator
)
logger.info("***** Running Prediction *****")
logger.info(" Num examples = %d", len(data_loader.dataset))
logger.info(" Batch size = %d", predict_args.batch_size)
model.eval()
with open(predict_args.input_file, "r") as f:
all_preds = []
for inputs in tqdm(data_loader, desc="Predicting"):
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(device)
with torch.no_grad():
outputs = model(
input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
prod_start_mask=inputs['prod_start_mask'],
prod_end_mask=inputs['prod_end_mask'],
prod_mask=inputs['prod_mask'],
token_type_ids=inputs['token_type_ids']
)
logits = outputs[0]
preds = model.decode(logits, mask=inputs['decoder_mask'].bool())
preds_list = [[label_map[x] for x in seq] for seq in preds]
all_preds += preds_list
write_predictions(
predict_args.input_file,
predict_args.output_file,
all_preds,
align="plain"
)
| [
"torch.utils.data.sampler.SequentialSampler",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.5.0 | jiangfeng1124/ChemRxnExtractor | 124ea09d944abb4375be38294a74f0de4b1087fa |
1.3 | from __future__ import division
import torch
import math
import random
from PIL import Image, ImageOps, ImageEnhance
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
import warnings
import scipy.ndimage.interpolation as itpl
import skimage.transform
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See https://en.wikipedia.org/wiki/Hue for more details on Hue.
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
"""
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(
'hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return img
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return img
def adjust_gamma(img, gamma, gain=1):
"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
I_out = 255 * gain * ((I_in / 255) ** gamma)
See https://en.wikipedia.org/wiki/Gamma_correction for more details.
Args:
img (PIL Image): PIL Image to be adjusted.
gamma (float): Non negative real number. gamma larger than 1 make the
shadows darker, while gamma smaller than 1 make dark regions
lighter.
gain (float): The constant multiplier.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
input_mode = img.mode
img = img.convert('RGB')
np_img = np.array(img, dtype=np.float32)
np_img = 255 * gain * ((np_img / 255)**gamma)
np_img = np.uint8(np.clip(np_img, 0, 255))
img = Image.fromarray(np_img, 'RGB').convert(input_mode)
return img
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
class ToTensor(object):
"""Convert a ``numpy.ndarray`` to tensor.
Converts a numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W).
"""
def __call__(self, img):
"""Convert a ``numpy.ndarray`` to tensor.
Args:
img (numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not (_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if isinstance(img, np.ndarray):
# handle numpy array
if img.ndim == 3:
img = torch.from_numpy(img.transpose((2, 0, 1)).copy())
elif img.ndim == 2:
img = torch.from_numpy(img.copy())
else:
raise RuntimeError(
'img should be ndarray with 2 or 3 dimensions. Got {}'.
format(img.ndim))
return img
class NormalizeNumpyArray(object):
"""Normalize a ``numpy.ndarray`` with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(M1,..,Mn)`` for ``n`` channels, this transform
will normalize each channel of the input ``numpy.ndarray`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, img):
"""
Args:
img (numpy.ndarray): Image of size (H, W, C) to be normalized.
Returns:
Tensor: Normalized image.
"""
if not (_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
# TODO: make efficient
print(img.shape)
for i in range(3):
img[:, :, i] = (img[:, :, i] - self.mean[i]) / self.std[i]
return img
class NormalizeTensor(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(M1,..,Mn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
if not _is_tensor_image(tensor):
raise TypeError('tensor is not a torch image.')
# TODO: make efficient
for t, m, s in zip(tensor, self.mean, self.std):
t.sub_(m).div_(s)
return tensor
class Rotate(object):
"""Rotates the given ``numpy.ndarray``.
Args:
angle (float): The rotation angle in degrees.
"""
def __init__(self, angle):
self.angle = angle
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be rotated.
Returns:
img (numpy.ndarray (C x H x W)): Rotated image.
"""
# order=0 means nearest-neighbor type interpolation
return skimage.transform.rotate(img, self.angle, resize=False, order=0)
class Resize(object):
"""Resize the the given ``numpy.ndarray`` to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation='nearest'):
assert isinstance(size, float)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be scaled.
Returns:
img (numpy.ndarray (C x H x W)): Rescaled image.
"""
if img.ndim == 3:
return skimage.transform.rescale(img, self.size, order=0)
elif img.ndim == 2:
return skimage.transform.rescale(img, self.size, order=0)
else:
RuntimeError(
'img should be ndarray with 2 or 3 dimensions. Got {}'.format(
img.ndim))
class CenterCrop(object):
"""Crops the given ``numpy.ndarray`` at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for center crop.
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for center crop.
"""
h = img.shape[0]
w = img.shape[1]
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
# # randomized cropping
# i = np.random.randint(i-3, i+4)
# j = np.random.randint(j-3, j+4)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
Returns:
img (numpy.ndarray (C x H x W)): Cropped image.
"""
i, j, h, w = self.get_params(img, self.size)
"""
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
"""
if not (_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if img.ndim == 3:
return img[i:i + h, j:j + w, :]
elif img.ndim == 2:
return img[i:i + h, j:j + w]
else:
raise RuntimeError(
'img should be ndarray with 2 or 3 dimensions. Got {}'.format(
img.ndim))
class BottomCrop(object):
"""Crops the given ``numpy.ndarray`` at the bottom. Meaning - If we imagine putting a soccer goal on top of the
image, the goal parts will be cropped and only the inside will remain. Without the top & the sides of the original
image.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for bottom crop.
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for bottom crop.
"""
h = img.shape[0]
w = img.shape[1]
th, tw = output_size
i = h - th
j = int(round((w - tw) / 2.))
# randomized left and right cropping
# i = np.random.randint(i-3, i+4)
# j = np.random.randint(j-1, j+1)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
Returns:
img (numpy.ndarray (C x H x W)): Cropped image.
"""
i, j, h, w = self.get_params(img, self.size)
"""
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
"""
if not (_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if img.ndim == 3:
return img[i:i + h, j:j + w, :]
elif img.ndim == 2:
return img[i:i + h, j:j + w]
else:
raise RuntimeError(
'img should be ndarray with 2 or 3 dimensions. Got {}'.format(
img.ndim))
class Crop(object):
"""Crops the given ``numpy.ndarray`` at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, crop):
self.crop = crop
@staticmethod
def get_params(img, crop):
"""Get parameters for ``crop`` for center crop.
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for center crop.
"""
x_l, x_r, y_b, y_t = crop
h = img.shape[0]
w = img.shape[1]
assert x_l >= 0 and x_l < w
assert x_r >= 0 and x_r < w
assert y_b >= 0 and y_b < h
assert y_t >= 0 and y_t < h
assert x_l < x_r and y_b < y_t
return x_l, x_r, y_b, y_t
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
Returns:
img (numpy.ndarray (C x H x W)): Cropped image.
"""
x_l, x_r, y_b, y_t = self.get_params(img, self.crop)
"""
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
"""
if not (_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if img.ndim == 3:
return img[y_b:y_t, x_l:x_r, :]
elif img.ndim == 2:
return img[y_b:y_t, x_l:x_r]
else:
raise RuntimeError(
'img should be ndarray with 2 or 3 dimensions. Got {}'.format(
img.ndim))
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class HorizontalFlip(object):
"""Horizontally flip the given ``numpy.ndarray``.
Args:
do_flip (boolean): whether or not do horizontal flip.
"""
def __init__(self, do_flip):
self.do_flip = do_flip
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be flipped.
Returns:
img (numpy.ndarray (C x H x W)): flipped image.
"""
if not (_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if self.do_flip:
return np.fliplr(img)
else:
return img
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
transforms = []
transforms.append(
Lambda(lambda img: adjust_brightness(img, brightness)))
transforms.append(Lambda(lambda img: adjust_contrast(img, contrast)))
transforms.append(
Lambda(lambda img: adjust_saturation(img, saturation)))
transforms.append(Lambda(lambda img: adjust_hue(img, hue)))
np.random.shuffle(transforms)
self.transform = Compose(transforms)
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Input image.
Returns:
img (numpy.ndarray (C x H x W)): Color jittered image.
"""
if not (_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
pil = Image.fromarray(img)
return np.array(self.transform(pil))
| [
"torch.is_tensor"
] | 1.3.1 | vision-and-sensing/Adaptive-LiDAR-Sampling | fa49901cd9662393ffc2d267633ebe0b65be0a30 |
1.7 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import _init_paths
import os
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.decode import exct_decode, agnex_ct_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform, transform_preds
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class ExdetDetector(BaseDetector):
def __init__(self, opt):
super(ExdetDetector, self).__init__(opt)
self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
t_heat = output['hm_t'].sigmoid_()
l_heat = output['hm_l'].sigmoid_()
b_heat = output['hm_b'].sigmoid_()
r_heat = output['hm_r'].sigmoid_()
c_heat = output['hm_c'].sigmoid_()
torch.cuda.synchronize()
forward_time = time.time()
if self.opt.reg_offset:
dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat,
output['reg_t'], output['reg_l'],
output['reg_b'], output['reg_r'],
K=self.opt.K,
scores_thresh=self.opt.scores_thresh,
center_thresh=self.opt.center_thresh,
aggr_weight=self.opt.aggr_weight)
else:
dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat, K=self.opt.K,
scores_thresh=self.opt.scores_thresh,
center_thresh=self.opt.center_thresh,
aggr_weight=self.opt.aggr_weight)
if return_time:
return output, dets, forward_time
else:
return output, dets
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
inp_height, inp_width = images.shape[2], images.shape[3]
pred_hm = np.zeros((inp_height, inp_width, 3), dtype=np.uint8)
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
parts = ['t', 'l', 'b', 'r', 'c']
for p in parts:
tag = 'hm_{}'.format(p)
pred = debugger.gen_colormap(
output[tag][i].detach().cpu().numpy(), (inp_height, inp_width))
if p != 'c':
pred_hm = np.maximum(pred_hm, pred)
else:
debugger.add_blend_img(
img, pred, 'pred_{}_{:.1f}'.format(p, scale))
debugger.add_blend_img(img, pred_hm, 'pred_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_{:.1f}'.format(scale))
for k in range(len(detection[i])):
# print('detection', detection[i, k, 4], detection[i, k])
if detection[i, k, 4] > 0.01:
# print('detection', detection[i, k, 4], detection[i, k])
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_{:.1f}'.format(scale))
def post_process(self, dets, meta, scale=1):
out_width, out_height = meta['out_width'], meta['out_height']
dets = dets.detach().cpu().numpy().reshape(2, -1, 14)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets = dets.reshape(1, -1, 14)
dets[0, :, 0:2] = transform_preds(
dets[0, :, 0:2], meta['c'], meta['s'], (out_width, out_height))
dets[0, :, 2:4] = transform_preds(
dets[0, :, 2:4], meta['c'], meta['s'], (out_width, out_height))
dets[:, :, 0:4] /= scale
return dets[0]
def merge_outputs(self, detections):
detections = np.concatenate(
[detection for detection in detections], axis=0).astype(np.float32)
classes = detections[..., -1]
keep_inds = (detections[:, 4] > 0)
detections = detections[keep_inds]
classes = classes[keep_inds]
results = {}
for j in range(self.num_classes):
keep_inds = (classes == j)
results[j + 1] = detections[keep_inds][:, 0:7].astype(np.float32)
soft_nms(results[j + 1], Nt=0.5, method=2)
results[j + 1] = results[j + 1][:, 0:5]
scores = np.hstack([
results[j][:, -1]
for j in range(1, self.num_classes + 1)
])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, -1] >= thresh)
results[j] = results[j][keep_inds]
return results
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='exdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='exdet')
debugger.show_all_imgs(pause=self.pause)
| [
"torch.cuda.synchronize",
"torch.no_grad"
] | 1.7.1 | vivym/OpenKS | ea380782162de2e4c1a413f37ad12b85ccb7048a |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from queue import Queue
import numpy as np
import math
from ..util import box_ops
from ..util.misc import accuracy, get_world_size, is_dist_avail_and_initialized
def focal_loss(preds, gts, alpha, gamma):
pos_inds = gts.gt(0).float()
neg_inds = gts.eq(0).float()
neg_weights = torch.pow(1 - gts, 4)
pos_loss = torch.log(preds) * torch.pow(1 - preds, gamma) * pos_inds
neg_loss = torch.log(1 - preds) * torch.pow(preds, gamma) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
loss = 0
if num_pos == 0:
loss = loss - neg_loss
else:
if alpha is None:
alpha_pos = alpha_neg = 1.
else:
alpha_pos = float(alpha)
alpha_neg = 1. - alpha_pos
loss = loss - (alpha_pos * pos_loss + alpha_neg * neg_loss) / num_pos
return loss
class SetCriterionHOI(nn.Module):
def __init__(self, num_obj_classes, num_queries, num_verb_classes, matcher,
weight_dict, eos_coef, losses, verb_loss_type, verb_gamma, verb_alpha):
super().__init__()
self.num_obj_classes = num_obj_classes
self.num_queries = num_queries
self.num_verb_classes = num_verb_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_obj_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
assert verb_loss_type in ['bce', 'focal']
self.verb_loss_type = verb_loss_type
self.verb_gamma = verb_gamma
self.verb_alpha = verb_alpha
def loss_obj_labels(self, outputs, targets, indices, num_interactions, log=True):
assert 'pred_obj_logits' in outputs
src_logits = outputs['pred_obj_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t['obj_labels'][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_obj_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_obj_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_obj_ce': loss_obj_ce}
if log:
losses['obj_class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_obj_cardinality(self, outputs, targets, indices, num_interactions):
pred_logits = outputs['pred_obj_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v['obj_labels']) for v in targets], device=device)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'obj_cardinality_error': card_err}
return losses
def loss_verb_labels(self, outputs, targets, indices, num_interactions):
assert 'pred_verb_logits' in outputs
src_logits = outputs['pred_verb_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t['verb_labels'][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.zeros_like(src_logits)
target_classes[idx] = target_classes_o
if self.verb_loss_type == 'bce':
loss_verb_ce = F.binary_cross_entropy_with_logits(src_logits, target_classes)
# focal loss
else:
src_logits = src_logits.sigmoid()
loss_verb_ce = focal_loss(src_logits, target_classes, alpha=self.verb_alpha,
gamma=self.verb_gamma)
losses = {'loss_verb_ce': loss_verb_ce}
return losses
def loss_sub_obj_boxes(self, outputs, targets, indices, num_interactions):
assert 'pred_sub_boxes' in outputs and 'pred_obj_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_sub_boxes = outputs['pred_sub_boxes'][idx]
src_obj_boxes = outputs['pred_obj_boxes'][idx]
target_sub_boxes = torch.cat([t['sub_boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
target_obj_boxes = torch.cat([t['obj_boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
exist_obj_boxes = (target_obj_boxes != 0).any(dim=1)
losses = {}
if src_sub_boxes.shape[0] == 0:
losses['loss_sub_bbox'] = src_sub_boxes.sum()
losses['loss_obj_bbox'] = src_obj_boxes.sum()
losses['loss_sub_giou'] = src_sub_boxes.sum()
losses['loss_obj_giou'] = src_obj_boxes.sum()
else:
loss_sub_bbox = F.l1_loss(src_sub_boxes, target_sub_boxes, reduction='none')
loss_obj_bbox = F.l1_loss(src_obj_boxes, target_obj_boxes, reduction='none')
losses['loss_sub_bbox'] = loss_sub_bbox.sum() / num_interactions
losses['loss_obj_bbox'] = (loss_obj_bbox * exist_obj_boxes.unsqueeze(1)).sum() / (exist_obj_boxes.sum() + 1e-4)
loss_sub_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_sub_boxes),
box_ops.box_cxcywh_to_xyxy(target_sub_boxes)))
loss_obj_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_obj_boxes),
box_ops.box_cxcywh_to_xyxy(target_obj_boxes)))
losses['loss_sub_giou'] = loss_sub_giou.sum() / num_interactions
losses['loss_obj_giou'] = (loss_obj_giou * exist_obj_boxes).sum() / (exist_obj_boxes.sum() + 1e-4)
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num, **kwargs):
loss_map = {
'obj_labels': self.loss_obj_labels,
'obj_cardinality': self.loss_obj_cardinality,
'verb_labels': self.loss_verb_labels,
'sub_obj_boxes': self.loss_sub_obj_boxes,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num, **kwargs)
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
num_interactions = sum(len(t['obj_labels']) for t in targets)
num_interactions = torch.as_tensor([num_interactions], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_interactions)
num_interactions = torch.clamp(num_interactions / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_interactions))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
kwargs = {}
if loss == 'obj_labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_interactions, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def build_criterion(args, matcher, weight_dict, num_classes):
if args.hoi:
losses = ['obj_labels', 'verb_labels', 'sub_obj_boxes', 'obj_cardinality']
if args.use_matching:
raise ValueError('not implement!')
return SetCriterionHOI(
args.num_obj_classes,
args.num_queries,
args.num_verb_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=args.eos_coef,
losses=losses,
verb_loss_type=args.verb_loss_type,
verb_gamma=args.verb_gamma,
verb_alpha=args.verb_alpha)
raise ValueError('not implement!')
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.cat",
"torch.nn.functional.l1_loss",
"torch.no_grad",
"torch.ones",
"torch.full_like",
"torch.full",
"torch.distributed.all_reduce",
"torch.zeros_like",
"torch.log",
"torch.pow"
] | 1.7.1 | vivym/OpenKS | ea380782162de2e4c1a413f37ad12b85ccb7048a |
3 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d.common.workaround import _safe_det_3x3
class TestSafeDet3x3(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(42)
np.random.seed(42)
def _test_det_3x3(self, batch_size, device):
t = torch.rand((batch_size, 3, 3), dtype=torch.float32, device=device)
actual_det = _safe_det_3x3(t)
expected_det = t.det()
self.assertClose(actual_det, expected_det, atol=1e-7)
def test_empty_batch(self):
self._test_det_3x3(0, torch.device("cpu"))
self._test_det_3x3(0, torch.device("cuda:0"))
def test_manual(self):
t = torch.Tensor(
[
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[2, -5, 3], [0, 7, -2], [-1, 4, 1]],
[[6, 1, 1], [4, -2, 5], [2, 8, 7]],
]
).to(dtype=torch.float32)
expected_det = torch.Tensor([1, 41, -306]).to(dtype=torch.float32)
self.assertClose(_safe_det_3x3(t), expected_det)
device_cuda = torch.device("cuda:0")
self.assertClose(
_safe_det_3x3(t.to(device=device_cuda)), expected_det.to(device=device_cuda)
)
def test_regression(self):
tries = 32
device_cpu = torch.device("cpu")
device_cuda = torch.device("cuda:0")
batch_sizes = np.random.randint(low=1, high=128, size=tries)
for batch_size in batch_sizes:
self._test_det_3x3(batch_size, device_cpu)
self._test_det_3x3(batch_size, device_cuda)
| [
"torch.rand",
"torch.device",
"torch.manual_seed",
"torch.Tensor"
] | 3 | janEbert/pytorch3d | accdac80fb29e82f72d4e8e73135ba8fd790b6c0 |
1.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
import logging
import os
import copy
import json
import torch
from torch.utils.data import TensorDataset
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
# 이걸 Train 때만 막는게 맞을지 아니면 Evaluation할 때도 막는게 맞을 지 잘 모르겠다.
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length)
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and False:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split(" ")
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels))
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = tokenizer.num_added_tokens()
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)
)
return features
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] | [
"torch.save",
"torch.tensor",
"torch.load",
"torch.distributed.barrier",
"torch.utils.data.TensorDataset"
] | 1.4.0 | yeongjoon/NER | d2c93597726ed9507bfddea9197007d30aeaad8b |
1.5 | import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pdb
from .layers import *
import torch
import torch.nn as nn
class Speech2Gesture_G(nn.Module):
'''
Baseline: http://people.eecs.berkeley.edu/~shiry/projects/speech2gesture/
input_shape: (N, time, frequency)
output_shape: (N, time, pose_feats)
'''
def __init__(self, time_steps=64, in_channels=256, out_feats=104, p=0):
super(Speech2Gesture_G, self).__init__()
self.audio_encoder = AudioEncoder(output_feats = time_steps, p=p)
self.unet = UNet1D(input_channels = in_channels, output_channels = in_channels, p=p)
self.decoder = nn.Sequential(*nn.ModuleList([ConvNormRelu(in_channels, in_channels,
type='1d', leaky=True, downsample=False,
p=p)
for i in range(4)]))
self.logits = nn.Conv1d(in_channels, out_feats, kernel_size=1, stride=1)
def forward(self, x, y, time_steps=None, **kwargs):
if x.dim() == 3:
x = x.unsqueeze(dim=1)
x = self.audio_encoder(x, time_steps)
x = self.unet(x)
x = self.decoder(x)
x = self.logits(x)
internal_losses = []
return x.transpose(-1, -2), internal_losses
class Speech2Gesture_D(nn.Module):
'''
Baseline: http://people.eecs.berkeley.edu/~shiry/projects/speech2gesture/
input_shape: (N, time, pose_feats)
output_shape: (N, *, 1) ## discriminator scores
'''
def __init__(self, in_channels=104, out_channels=64, n_downsampling=2, p=0, groups=1, **kwargs):
super(Speech2Gesture_D, self).__init__()
self.conv1 = nn.Sequential(torch.nn.Conv1d(in_channels*groups, out_channels*groups, 4, 2, padding=1, groups=groups),
torch.nn.LeakyReLU(negative_slope=0.2))
self.conv2 = nn.ModuleList([])
for n in range(1, n_downsampling):
ch_mul = min(2**n, 8)
self.conv2.append(ConvNormRelu(out_channels, out_channels*ch_mul,
type='1d', downsample=True, leaky=True, p=p, groups=groups))
self.conv2 = nn.Sequential(*self.conv2)
ch_mul_new = min(2**n_downsampling, 8)
self.conv3 = ConvNormRelu(out_channels*ch_mul, out_channels*ch_mul_new,
type='1d', leaky=True, kernel_size=4, stride=1, p=p, groups=groups)
out_shape = 1 if 'out_shape' not in kwargs else kwargs['out_shape']
self.logits = nn.Conv1d(out_channels*ch_mul_new*groups, out_shape*groups, kernel_size=4, stride=1, groups=groups)
def forward(self, x):
x = x.transpose(-1, -2)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.logits(x)
internal_losses = []
return x.transpose(-1, -2).squeeze(dim=-1), internal_losses
| [
"torch.nn.Sequential",
"torch.nn.LeakyReLU",
"torch.nn.ModuleList",
"torch.nn.Conv1d"
] | 1.5.0 | chahuja/mix-stage | 6f47626ce46bd9b28c45d1255b328b17b3650c4f |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from torch import Tensor, device, dtype, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
cached_path,
hf_bucket_url,
is_remote_url,
is_torch_tpu_available,
replace_return_docstrings,
)
from .generation_utils import GenerationMixin
from .utils import logging
logger = logging.get_logger(__name__)
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive."""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
def find_pruneable_heads_and_indices(
heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return heads, index
class ModuleUtilsMixin:
"""
A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
"""
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
zero with :obj:`model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
"""
Reset the :obj:`mem_rss_diff` attribute of each module (see
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
"""
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> device:
"""
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def dtype(self) -> dtype:
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype == torch.float32:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
"{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format(
self.dtype
)
)
return encoder_extended_attention_mask
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(
self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
) -> Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (:obj:`int`):
The number of hidden layers in the model.
is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or
list with :obj:`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility
return head_mask
def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
"""
Get number of (optionally, trainable or non-embeddings) parameters in the module.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of non-embeddings parameters
Returns:
:obj:`int`: The number of parameters.
"""
def parameter_filter(x):
return (x.requires_grad or not only_trainable) and not (
isinstance(x, torch.nn.Embedding) and exclude_embeddings
)
params = filter(parameter_filter, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
"""
Helper function to estimate the total number of tokens from the model inputs.
Args:
inputs (:obj:`dict`): The model inputs.
Returns:
:obj:`int`: The total number of tokens.
"""
token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
if token_inputs:
return sum([token_input.numel() for token_input in token_inputs])
else:
warnings.warn(
"Could not estimate the number of tokens of the input, floating-point operations will not be computed"
)
return 0
def floating_point_ops(
self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
) -> int:
"""
Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper
<https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter
re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
Args:
batch_size (:obj:`int`):
The batch size for the forward pass.
sequence_length (:obj:`int`):
The number of tokens in each line of the batch.
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to count embedding and softmax operations.
Returns:
:obj:`int`: The number of floating-point operations.
"""
return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin):
r"""
Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch
model, taking as arguments:
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
TensorFlow checkpoint.
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to
the model.
- **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
- **authorized_missing_keys** (:obj:`Optional[List[str]]`) -- A list of re pattern of tensor names to ignore
when loading the model (and avoid unnecessary warnings).
- **keys_to_never_save** (:obj:`Optional[List[str]]`) -- A list of of tensor names to ignore when saving the
model (useful for keys that aren't trained, but which are deterministic)
"""
config_class = None
base_model_prefix = ""
authorized_missing_keys = None
authorized_unexpected_keys = None
keys_to_never_save = None
@property
def dummy_inputs(self) -> Dict[str, torch.Tensor]:
"""
:obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@property
def base_model(self) -> nn.Module:
"""
:obj:`torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self) -> nn.Module:
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value: nn.Module):
"""
Set model's input embeddings.
Args:
value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> nn.Module:
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None and self.config.tie_word_embeddings:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
if hasattr(self, self.base_model_prefix):
self = getattr(self, self.base_model_prefix)
self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)
@staticmethod
def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
uninitialized_encoder_weights: List[str] = []
if decoder.__class__ != encoder.__class__:
logger.info(
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
)
def tie_encoder_to_decoder_recursively(
decoder_pointer: nn.Module,
encoder_pointer: nn.Module,
module_name: str,
uninitialized_encoder_weights: List[str],
depth=0,
):
assert isinstance(decoder_pointer, nn.Module) and isinstance(
encoder_pointer, nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
if hasattr(decoder_pointer, "weight"):
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
encoder_modules
) != len(decoder_modules):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and subtract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
if len(uninitialized_encoder_weights) > 0:
logger.warning(
f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
)
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
"""Tie or clone module weights depending of whether we are using TorchScript or not"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(
0,
output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing
anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
) -> torch.nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Embedding`` module of the model without doing anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
"""
Initializes and prunes weights if needed.
"""
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
state_dict = model_to_save.state_dict()
# Handle the case where some state_dict keys shouldn't be saved
if self.keys_to_never_save is not None:
state_dict = {k: v for k, v in state_dict.items() if k not in self.keys_to_never_save}
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False) and is_torch_tpu_available():
import torch_xla.core.xla_model as xm
if xm.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# xm.save takes care of saving only from master
xm.save(state_dict, output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(state_dict, output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
train the model, you should first set it back in training mode with ``model.train()``.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string with the `shortcut name` of a pretrained model to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `shortcut name` string of a
pretrained model).
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (i.e., do not try to download the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import BertConfig, BertModel
>>> # Download model and configuration from S3 and cache.
>>> model = BertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = BertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
revision=revision,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
f"at '{resolved_archive_file}'"
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict,
prefix,
local_metadata,
True,
missing_keys,
unexpected_keys,
error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls.authorized_missing_keys is not None:
for pat in cls.authorized_missing_keys:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls.authorized_unexpected_keys is not None:
for pat in cls.authorized_unexpected_keys:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
# make sure token embedding weights are still tied if needed
model.tie_weights()
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available():
import torch_xla.core.xla_model as xm
model = xm.send_cpu_data_to_device(model, xm.xla_device())
model.to(xm.xla_device())
return model
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`): The number of output features.
nx (:obj:`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
"""
Compute SQuAD start logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(
self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
Returns:
:obj:`torch.FloatTensor`: The start logits for SQuAD.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
"""
Compute SQuAD end logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The end logits for SQuAD.
"""
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
"""
Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
"""
# No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
hsz = hidden_states.shape[-1]
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
@dataclass
class SquadHeadOutput(ModelOutput):
"""
Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification
losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
(beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
class SQuADHead(nn.Module):
r"""
A SQuAD head inspired by XLNet.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config):
super().__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
@replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
def forward(
self,
hidden_states: torch.FloatTensor,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
is_impossible: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
return_dict: bool = False,
) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
Final hidden states of the model on the sequence tokens.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the first token for the labeled span.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the last token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Whether the question has a possible answer in the paragraph or not.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
"""
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
if not return_dict:
return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
else:
return SquadHeadOutput(
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
class SequenceSummary(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
self.first_dropout = Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
token.
Returns:
:obj:`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
"""
Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
are transposed.
Used to remove heads.
Args:
layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.
Returns:
:class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(
layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[torch.nn.Linear, Conv1D]:
"""
Prune a Conv1D or linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with
:obj:`requires_grad=True`.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
def apply_chunking_to_forward(
forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
) -> torch.Tensor:
"""
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Args:
forward_fn (:obj:`Callable[..., torch.Tensor]`):
The forward function of the model.
chunk_size (:obj:`int`):
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
chunk_dim (:obj:`int`):
The dimension over which the :obj:`input_tensors` should be chunked.
input_tensors (:obj:`Tuple[torch.Tensor]`):
The input tensors of ``forward_fn`` which will be chunked
Returns:
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
"""
assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
tensor_shape = input_tensors[0].shape[chunk_dim]
assert all(
input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
assert num_args_in_forward_chunk_fn == len(
input_tensors
), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
num_args_in_forward_chunk_fn, len(input_tensors)
)
if chunk_size > 0:
assert (
input_tensors[0].shape[chunk_dim] % chunk_size == 0
), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
input_tensors[0].shape[chunk_dim], chunk_size
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
| [
"torch.nn.Linear",
"torch.cat",
"torch.einsum",
"torch.nn.Parameter",
"torch.ones",
"torch.load",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.pad",
"torch.nn.CrossEntropyLoss",
"torch.topk",
"torch.nn.LayerNorm",
"torch.gather",
"torch.is_tensor",
"torch.nn.init.normal_",
"torch.tensor",
"torch.empty",
"torch.zeros",
"torch.nn.Identity",
"torch.nn.Tanh",
"torch.save",
"torch.full_like",
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.arange",
"torch.nn.Embedding"
] | 1.0 | cbrochtrup/transformers | c89bdfbe720bc8f41c7dc6db5473a2cb0955f224 |
1.3 | import os
from distutils.version import LooseVersion
from importlib.util import find_spec
from typing import Optional, Union
from unittest.mock import patch
import pytest
import torch
from pytest import approx
from torch.nn import Linear
from torch.nn.functional import mse_loss
from torch.optim import SGD
import ignite.distributed as idist
from ignite.engine import (
create_supervised_evaluator,
create_supervised_trainer,
supervised_evaluation_step,
supervised_evaluation_step_amp,
supervised_training_step_tpu,
)
from ignite.metrics import MeanSquaredError
def _test_create_supervised_trainer(
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
):
model = Linear(1, 1)
if model_device:
model.to(model_device)
model.weight.data.zero_()
model.bias.data.zero_()
optimizer = SGD(model.parameters(), 0.1)
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
if amp_mode == "apex" and model_device == trainer_device == "cuda":
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
trainer = create_supervised_trainer(
model,
optimizer,
mse_loss,
device=trainer_device,
output_transform=lambda x, y, y_pred, loss: (y_pred, loss.item()),
amp_mode=amp_mode,
scaler=scaler,
)
x = torch.tensor([[0.1], [0.2]])
y = torch.tensor([[0.3], [0.5]])
data = [(x, y)]
assert model.weight.data[0, 0].item() == approx(0.0)
assert model.bias.item() == approx(0.0)
if model_device == trainer_device or ((model_device == "cpu") ^ (trainer_device == "cpu")):
state = trainer.run(data)
assert state.output[-1] == approx(0.17), state.output[-1]
assert round(model.weight.data[0, 0].item(), 3) == approx(0.013), model.weight.item()
assert round(model.bias.item(), 3) == approx(0.08), model.bias.item()
if amp_mode == "amp":
assert state.output[0].dtype is torch.half
if scaler and isinstance(scaler, bool):
assert hasattr(state, "scaler")
else:
assert not hasattr(state, "scaler")
else:
if LooseVersion(torch.__version__) >= LooseVersion("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"is on CPU, but expected them to be on GPU"):
trainer.run(data)
def _test_create_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
model = Linear(1, 1)
if model_device:
model.to(model_device)
model.weight.data.zero_()
model.bias.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
evaluator = create_supervised_evaluator(model, device=evaluator_device, amp_mode=amp_mode)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
if model_device == evaluator_device or ((model_device == "cpu") ^ (evaluator_device == "cpu")):
state = evaluator.run(data)
y_pred, y = state.output
assert y_pred[0, 0].item() == approx(0.0)
assert y_pred[1, 0].item() == approx(0.0)
assert y[0, 0].item() == approx(3.0)
assert y[1, 0].item() == approx(5.0)
assert model.weight.data[0, 0].item() == approx(0.0)
assert model.bias.item() == approx(0.0)
else:
if LooseVersion(torch.__version__) >= LooseVersion("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"is on CPU, but expected them to be on GPU"):
evaluator.run(data)
def test_create_supervised_trainer():
_test_create_supervised_trainer()
def test_create_supervised_trainer_with_cpu():
_test_create_supervised_trainer(trainer_device="cpu")
def test_create_supervised_trainer_traced_with_cpu():
_test_create_supervised_trainer(trainer_device="cpu", trace=True)
@pytest.mark.skipif(find_spec("apex"), reason="Skip if APEX")
def test_create_supervised_trainer_apex_error():
with pytest.raises(
ModuleNotFoundError, match="Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'."
):
_test_create_supervised_trainer(amp_mode="apex")
@pytest.fixture
def mock_torch_cuda_amp_module():
with patch.dict(
"sys.modules",
{"torch.cuda.amp": None, "torch.cuda.amp.grad_scaler": None, "torch.cuda.amp.autocast_mode": None},
):
yield torch
def test_create_supervised_trainer_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_trainer(amp_mode="amp")
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use scaler argument."):
_test_create_supervised_trainer(amp_mode="amp", scaler=True)
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
def test_create_supervised_trainer_scaler_not_amp():
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=scaler)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=True)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=True)
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=scaler)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp_scaler():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=True
)
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
_test_create_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=scaler
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
@pytest.mark.skipif(not find_spec("apex"), reason="Skip if no APEX")
def test_create_supervised_trainer_on_cuda_apex():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="apex")
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_supervised_training_step_tpu_no_xla():
with pytest.raises(ModuleNotFoundError, match="torch_xla cannot be imported, please install PyTorch XLA."):
supervised_training_step_tpu(model=None, optimizer=None, loss_fn=None)
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_no_xla():
model_device = "cpu"
trainer_device = "xla"
with pytest.raises(RuntimeError, match=r"In order to run on TPU, please install PyTorch XLA"):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu():
model_device = trainer_device = "xla"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_amp():
model_device = trainer_device = "xla"
with pytest.raises(ValueError, match="amp_mode cannot be used with xla device."):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_with_model_on_cpu():
_test_create_supervised_trainer(trainer_device="cuda")
def test_create_supervised_evaluator():
_test_create_supervised_evaluator()
def test_create_supervised_evaluator_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu")
def test_create_supervised_evaluator_traced_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu", trace=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_with_model_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cuda")
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_amp():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device, amp_mode="amp")
def test_create_supervised_evaluator_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_evaluator(amp_mode="amp")
def test_create_supervised_evaluator_with_metrics():
model = Linear(1, 1)
model.weight.data.zero_()
model.bias.data.zero_()
evaluator = create_supervised_evaluator(model, metrics={"mse": MeanSquaredError()})
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [4.0]])
data = [(x, y)]
state = evaluator.run(data)
assert state.metrics["mse"] == 12.5
| [
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.tensor",
"torch.jit.trace",
"torch.randn"
] | 1.3 | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.