text
stringlengths 145
7.65M
|
---|
====================================================================================================================
SOURCE CODE FILE: lsun.py
LINES: 1
SIZE: 5.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\lsun.py
ENCODING: utf-8
```py
import io
import os.path
import pickle
import string
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Callable, cast, List, Optional, Tuple, Union
from PIL import Image
from .utils import iterable_to_str, verify_str_arg
from .vision import VisionDataset
class LSUNClass(VisionDataset):
def __init__(
self, root: str, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None
) -> None:
import lmdb
super().__init__(root, transform=transform, target_transform=target_transform)
self.env = lmdb.open(root, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = txn.stat()["entries"]
cache_file = "_cache_" + "".join(c for c in root if c in string.ascii_letters)
if os.path.isfile(cache_file):
self.keys = pickle.load(open(cache_file, "rb"))
else:
with self.env.begin(write=False) as txn:
self.keys = [key for key in txn.cursor().iternext(keys=True, values=False)]
pickle.dump(self.keys, open(cache_file, "wb"))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
img, target = None, None
env = self.env
with env.begin(write=False) as txn:
imgbuf = txn.get(self.keys[index])
buf = io.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert("RGB")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return self.length
class LSUN(VisionDataset):
"""`LSUN <https://paperswithcode.com/dataset/lsun>`_ dataset.
You will need to install the ``lmdb`` package to use this dataset: run
``pip install lmdb``
Args:
root (str or ``pathlib.Path``): Root directory for the database files.
classes (string or list): One of {'train', 'val', 'test'} or a list of
categories to load. e,g. ['bedroom_train', 'church_outdoor_train'].
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(
self,
root: Union[str, Path],
classes: Union[str, List[str]] = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.classes = self._verify_classes(classes)
# for each class, create an LSUNClassDataset
self.dbs = []
for c in self.classes:
self.dbs.append(LSUNClass(root=os.path.join(root, f"{c}_lmdb"), transform=transform))
self.indices = []
count = 0
for db in self.dbs:
count += len(db)
self.indices.append(count)
self.length = count
def _verify_classes(self, classes: Union[str, List[str]]) -> List[str]:
categories = [
"bedroom",
"bridge",
"church_outdoor",
"classroom",
"conference_room",
"dining_room",
"kitchen",
"living_room",
"restaurant",
"tower",
]
dset_opts = ["train", "val", "test"]
try:
classes = cast(str, classes)
verify_str_arg(classes, "classes", dset_opts)
if classes == "test":
classes = [classes]
else:
classes = [c + "_" + classes for c in categories]
except ValueError:
if not isinstance(classes, Iterable):
msg = "Expected type str or Iterable for argument classes, but got type {}."
raise ValueError(msg.format(type(classes)))
classes = list(classes)
msg_fmtstr_type = "Expected type str for elements in argument classes, but got type {}."
for c in classes:
verify_str_arg(c, custom_msg=msg_fmtstr_type.format(type(c)))
c_short = c.split("_")
category, dset_opt = "_".join(c_short[:-1]), c_short[-1]
msg_fmtstr = "Unknown value '{}' for {}. Valid values are {{{}}}."
msg = msg_fmtstr.format(category, "LSUN class", iterable_to_str(categories))
verify_str_arg(category, valid_values=categories, custom_msg=msg)
msg = msg_fmtstr.format(dset_opt, "postfix", iterable_to_str(dset_opts))
verify_str_arg(dset_opt, valid_values=dset_opts, custom_msg=msg)
return classes
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target) where target is the index of the target category.
"""
target = 0
sub = 0
for ind in self.indices:
if index < ind:
break
target += 1
sub = ind
db = self.dbs[target]
index = index - sub
if self.target_transform is not None:
target = self.target_transform(target)
img, _ = db[index]
return img, target
def __len__(self) -> int:
return self.length
def extra_repr(self) -> str:
return "Classes: {classes}".format(**self.__dict__)
```
|
=====================================================================================================================
SOURCE CODE FILE: mnist.py
LINES: 4
SIZE: 21.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\mnist.py
ENCODING: utf-8
```py
import codecs
import os
import os.path
import shutil
import string
import sys
import warnings
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from urllib.error import URLError
import numpy as np
import torch
from PIL import Image
from .utils import _flip_byte_order, check_integrity, download_and_extract_archive, extract_archive, verify_str_arg
from .vision import VisionDataset
class MNIST(VisionDataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``MNIST/raw/train-images-idx3-ubyte``
and ``MNIST/raw/t10k-images-idx3-ubyte`` exist.
train (bool, optional): If True, creates dataset from ``train-images-idx3-ubyte``,
otherwise from ``t10k-images-idx3-ubyte``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
mirrors = [
"https://ossci-datasets.s3.amazonaws.com/mnist/",
"http://yann.lecun.com/exdb/mnist/",
]
resources = [
("train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"),
("t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"),
("t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c"),
]
training_file = "training.pt"
test_file = "test.pt"
classes = [
"0 - zero",
"1 - one",
"2 - two",
"3 - three",
"4 - four",
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(
self,
root: Union[str, Path],
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.train = train # training set or test set
if self._check_legacy_exist():
self.data, self.targets = self._load_legacy_data()
return
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self.data, self.targets = self._load_data()
def _check_legacy_exist(self):
processed_folder_exists = os.path.exists(self.processed_folder)
if not processed_folder_exists:
return False
return all(
check_integrity(os.path.join(self.processed_folder, file)) for file in (self.training_file, self.test_file)
)
def _load_legacy_data(self):
# This is for BC only. We no longer cache the data in a custom binary, but simply read from the raw data
# directly.
data_file = self.training_file if self.train else self.test_file
return torch.load(os.path.join(self.processed_folder, data_file), weights_only=True)
def _load_data(self):
image_file = f"{'train' if self.train else 't10k'}-images-idx3-ubyte"
data = read_image_file(os.path.join(self.raw_folder, image_file))
label_file = f"{'train' if self.train else 't10k'}-labels-idx1-ubyte"
targets = read_label_file(os.path.join(self.raw_folder, label_file))
return data, targets
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
@property
def raw_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, "raw")
@property
def processed_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, "processed")
@property
def class_to_idx(self) -> Dict[str, int]:
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self) -> bool:
return all(
check_integrity(os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0]))
for url, _ in self.resources
)
def download(self) -> None:
"""Download the MNIST data if it doesn't exist already."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
# download files
for filename, md5 in self.resources:
errors = []
for mirror in self.mirrors:
url = f"{mirror}{filename}"
try:
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
except URLError as e:
errors.append(e)
continue
break
else:
s = f"Error downloading {filename}:\n"
for mirror, err in zip(self.mirrors, errors):
s += f"Tried {mirror}, got:\n{str(err)}\n"
raise RuntimeError(s)
def extra_repr(self) -> str:
split = "Train" if self.train is True else "Test"
return f"Split: {split}"
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``FashionMNIST/raw/train-images-idx3-ubyte``
and ``FashionMNIST/raw/t10k-images-idx3-ubyte`` exist.
train (bool, optional): If True, creates dataset from ``train-images-idx3-ubyte``,
otherwise from ``t10k-images-idx3-ubyte``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
mirrors = ["http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"]
resources = [
("train-images-idx3-ubyte.gz", "8d4fb7e6c68d591d4c3dfef9ec88bf0d"),
("train-labels-idx1-ubyte.gz", "25c81989df183df01b3e8a0aad5dffbe"),
("t10k-images-idx3-ubyte.gz", "bef4ecab320f06d8554ea6380940ec79"),
("t10k-labels-idx1-ubyte.gz", "bb300cfdad3c16e7a12a480ee83cd310"),
]
classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
class KMNIST(MNIST):
"""`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``KMNIST/raw/train-images-idx3-ubyte``
and ``KMNIST/raw/t10k-images-idx3-ubyte`` exist.
train (bool, optional): If True, creates dataset from ``train-images-idx3-ubyte``,
otherwise from ``t10k-images-idx3-ubyte``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
mirrors = ["http://codh.rois.ac.jp/kmnist/dataset/kmnist/"]
resources = [
("train-images-idx3-ubyte.gz", "bdb82020997e1d708af4cf47b453dcf7"),
("train-labels-idx1-ubyte.gz", "e144d726b3acfaa3e44228e80efcd344"),
("t10k-images-idx3-ubyte.gz", "5c965bf0a639b31b8f53240b1b52f4d7"),
("t10k-labels-idx1-ubyte.gz", "7320c461ea6c1c855c0b718fb2a4b134"),
]
classes = ["o", "ki", "su", "tsu", "na", "ha", "ma", "ya", "re", "wo"]
class EMNIST(MNIST):
"""`EMNIST <https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``EMNIST/raw/train-images-idx3-ubyte``
and ``EMNIST/raw/t10k-images-idx3-ubyte`` exist.
split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``,
``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies
which one to use.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
url = "https://biometrics.nist.gov/cs_links/EMNIST/gzip.zip"
md5 = "58c8d27c78d21e728a6bc7b3cc06412e"
splits = ("byclass", "bymerge", "balanced", "letters", "digits", "mnist")
# Merged Classes assumes Same structure for both uppercase and lowercase version
_merged_classes = {"c", "i", "j", "k", "l", "m", "o", "p", "s", "u", "v", "w", "x", "y", "z"}
_all_classes = set(string.digits + string.ascii_letters)
classes_split_dict = {
"byclass": sorted(list(_all_classes)),
"bymerge": sorted(list(_all_classes - _merged_classes)),
"balanced": sorted(list(_all_classes - _merged_classes)),
"letters": ["N/A"] + list(string.ascii_lowercase),
"digits": list(string.digits),
"mnist": list(string.digits),
}
def __init__(self, root: Union[str, Path], split: str, **kwargs: Any) -> None:
self.split = verify_str_arg(split, "split", self.splits)
self.training_file = self._training_file(split)
self.test_file = self._test_file(split)
super().__init__(root, **kwargs)
self.classes = self.classes_split_dict[self.split]
@staticmethod
def _training_file(split) -> str:
return f"training_{split}.pt"
@staticmethod
def _test_file(split) -> str:
return f"test_{split}.pt"
@property
def _file_prefix(self) -> str:
return f"emnist-{self.split}-{'train' if self.train else 'test'}"
@property
def images_file(self) -> str:
return os.path.join(self.raw_folder, f"{self._file_prefix}-images-idx3-ubyte")
@property
def labels_file(self) -> str:
return os.path.join(self.raw_folder, f"{self._file_prefix}-labels-idx1-ubyte")
def _load_data(self):
return read_image_file(self.images_file), read_label_file(self.labels_file)
def _check_exists(self) -> bool:
return all(check_integrity(file) for file in (self.images_file, self.labels_file))
def download(self) -> None:
"""Download the EMNIST data if it doesn't exist already."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
download_and_extract_archive(self.url, download_root=self.raw_folder, md5=self.md5)
gzip_folder = os.path.join(self.raw_folder, "gzip")
for gzip_file in os.listdir(gzip_folder):
if gzip_file.endswith(".gz"):
extract_archive(os.path.join(gzip_folder, gzip_file), self.raw_folder)
shutil.rmtree(gzip_folder)
class QMNIST(MNIST):
"""`QMNIST <https://github.com/facebookresearch/qmnist>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset whose ``raw``
subdir contains binary files of the datasets.
what (string,optional): Can be 'train', 'test', 'test10k',
'test50k', or 'nist' for respectively the mnist compatible
training set, the 60k qmnist testing set, the 10k qmnist
examples that match the mnist testing set, the 50k
remaining qmnist testing examples, or all the nist
digits. The default is to select 'train' or 'test'
according to the compatibility argument 'train'.
compat (bool,optional): A boolean that says whether the target
for each example is class number (for compatibility with
the MNIST dataloader) or a torch vector containing the
full qmnist information. Default=True.
train (bool,optional,compatibility): When argument 'what' is
not specified, this boolean decides whether to load the
training set or the testing set. Default: True.
download (bool, optional): If True, downloads the dataset from
the internet and puts it in root directory. If dataset is
already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that
takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform
that takes in the target and transforms it.
"""
subsets = {"train": "train", "test": "test", "test10k": "test", "test50k": "test", "nist": "nist"}
resources: Dict[str, List[Tuple[str, str]]] = { # type: ignore[assignment]
"train": [
(
"https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-images-idx3-ubyte.gz",
"ed72d4157d28c017586c42bc6afe6370",
),
(
"https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-labels-idx2-int.gz",
"0058f8dd561b90ffdd0f734c6a30e5e4",
),
],
"test": [
(
"https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-images-idx3-ubyte.gz",
"1394631089c404de565df7b7aeaf9412",
),
(
"https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-labels-idx2-int.gz",
"5b5b05890a5e13444e108efe57b788aa",
),
],
"nist": [
(
"https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-images-idx3-ubyte.xz",
"7f124b3b8ab81486c9d8c2749c17f834",
),
(
"https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-labels-idx2-int.xz",
"5ed0e788978e45d4a8bd4b7caec3d79d",
),
],
}
classes = [
"0 - zero",
"1 - one",
"2 - two",
"3 - three",
"4 - four",
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
def __init__(
self, root: Union[str, Path], what: Optional[str] = None, compat: bool = True, train: bool = True, **kwargs: Any
) -> None:
if what is None:
what = "train" if train else "test"
self.what = verify_str_arg(what, "what", tuple(self.subsets.keys()))
self.compat = compat
self.data_file = what + ".pt"
self.training_file = self.data_file
self.test_file = self.data_file
super().__init__(root, train, **kwargs)
@property
def images_file(self) -> str:
(url, _), _ = self.resources[self.subsets[self.what]]
return os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0])
@property
def labels_file(self) -> str:
_, (url, _) = self.resources[self.subsets[self.what]]
return os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0])
def _check_exists(self) -> bool:
return all(check_integrity(file) for file in (self.images_file, self.labels_file))
def _load_data(self):
data = read_sn3_pascalvincent_tensor(self.images_file)
if data.dtype != torch.uint8:
raise TypeError(f"data should be of dtype torch.uint8 instead of {data.dtype}")
if data.ndimension() != 3:
raise ValueError("data should have 3 dimensions instead of {data.ndimension()}")
targets = read_sn3_pascalvincent_tensor(self.labels_file).long()
if targets.ndimension() != 2:
raise ValueError(f"targets should have 2 dimensions instead of {targets.ndimension()}")
if self.what == "test10k":
data = data[0:10000, :, :].clone()
targets = targets[0:10000, :].clone()
elif self.what == "test50k":
data = data[10000:, :, :].clone()
targets = targets[10000:, :].clone()
return data, targets
def download(self) -> None:
"""Download the QMNIST data if it doesn't exist already.
Note that we only download what has been asked for (argument 'what').
"""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
split = self.resources[self.subsets[self.what]]
for url, md5 in split:
download_and_extract_archive(url, self.raw_folder, md5=md5)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
# redefined to handle the compat flag
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img.numpy(), mode="L")
if self.transform is not None:
img = self.transform(img)
if self.compat:
target = int(target[0])
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def extra_repr(self) -> str:
return f"Split: {self.what}"
def get_int(b: bytes) -> int:
return int(codecs.encode(b, "hex"), 16)
SN3_PASCALVINCENT_TYPEMAP = {
8: torch.uint8,
9: torch.int8,
11: torch.int16,
12: torch.int32,
13: torch.float32,
14: torch.float64,
}
def read_sn3_pascalvincent_tensor(path: str, strict: bool = True) -> torch.Tensor:
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# read
with open(path, "rb") as f:
data = f.read()
# parse
if sys.byteorder == "little" or sys.platform == "aix":
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
else:
nd = get_int(data[0:1])
ty = get_int(data[1:2]) + get_int(data[2:3]) * 256 + get_int(data[3:4]) * 256 * 256
assert 1 <= nd <= 3
assert 8 <= ty <= 14
torch_type = SN3_PASCALVINCENT_TYPEMAP[ty]
s = [get_int(data[4 * (i + 1) : 4 * (i + 2)]) for i in range(nd)]
if sys.byteorder == "big" and not sys.platform == "aix":
for i in range(len(s)):
s[i] = int.from_bytes(s[i].to_bytes(4, byteorder="little"), byteorder="big", signed=False)
parsed = torch.frombuffer(bytearray(data), dtype=torch_type, offset=(4 * (nd + 1)))
# The MNIST format uses the big endian byte order, while `torch.frombuffer` uses whatever the system uses. In case
# that is little endian and the dtype has more than one byte, we need to flip them.
if sys.byteorder == "little" and parsed.element_size() > 1:
parsed = _flip_byte_order(parsed)
assert parsed.shape[0] == np.prod(s) or not strict
return parsed.view(*s)
def read_label_file(path: str) -> torch.Tensor:
x = read_sn3_pascalvincent_tensor(path, strict=False)
if x.dtype != torch.uint8:
raise TypeError(f"x should be of dtype torch.uint8 instead of {x.dtype}")
if x.ndimension() != 1:
raise ValueError(f"x should have 1 dimension instead of {x.ndimension()}")
return x.long()
def read_image_file(path: str) -> torch.Tensor:
x = read_sn3_pascalvincent_tensor(path, strict=False)
if x.dtype != torch.uint8:
raise TypeError(f"x should be of dtype torch.uint8 instead of {x.dtype}")
if x.ndimension() != 3:
raise ValueError(f"x should have 3 dimension instead of {x.ndimension()}")
return x
```
|
============================================================================================================================
SOURCE CODE FILE: moving_mnist.py
LINES: 1
SIZE: 3.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\moving_mnist.py
ENCODING: utf-8
```py
import os.path
from pathlib import Path
from typing import Callable, Optional, Union
import numpy as np
import torch
from torchvision.datasets.utils import download_url, verify_str_arg
from torchvision.datasets.vision import VisionDataset
class MovingMNIST(VisionDataset):
"""`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.
split (string, optional): The dataset split, supports ``None`` (default), ``"train"`` and ``"test"``.
If ``split=None``, the full data is returned.
split_ratio (int, optional): The split ratio of number of frames. If ``split="train"``, the first split
frames ``data[:, :split_ratio]`` is returned. If ``split="test"``, the last split frames ``data[:, split_ratio:]``
is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in a torch Tensor
and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
_URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy"
def __init__(
self,
root: Union[str, Path],
split: Optional[str] = None,
split_ratio: int = 10,
download: bool = False,
transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform)
self._base_folder = os.path.join(self.root, self.__class__.__name__)
self._filename = self._URL.split("/")[-1]
if split is not None:
verify_str_arg(split, "split", ("train", "test"))
self.split = split
if not isinstance(split_ratio, int):
raise TypeError(f"`split_ratio` should be an integer, but got {type(split_ratio)}")
elif not (1 <= split_ratio <= 19):
raise ValueError(f"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.")
self.split_ratio = split_ratio
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
if self.split == "train":
data = data[: self.split_ratio]
elif self.split == "test":
data = data[self.split_ratio :]
self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
def __getitem__(self, idx: int) -> torch.Tensor:
"""
Args:
idx (int): Index
Returns:
torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.
"""
data = self.data[idx]
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self) -> int:
return len(self.data)
def _check_exists(self) -> bool:
return os.path.exists(os.path.join(self._base_folder, self._filename))
def download(self) -> None:
if self._check_exists():
return
download_url(
url=self._URL,
root=self._base_folder,
filename=self._filename,
md5="be083ec986bfe91a449d63653c411eb2",
)
```
|
========================================================================================================================
SOURCE CODE FILE: omniglot.py
LINES: 1
SIZE: 4.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\omniglot.py
ENCODING: utf-8
```py
from os.path import join
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: Union[str, Path],
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Optional[Callable[[Union[str, Path]], Any]] = None,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
self.loader = loader
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L") if self.loader is None else self.loader(image_path)
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
```
|
===============================================================================================================================
SOURCE CODE FILE: oxford_iiit_pet.py
LINES: 1
SIZE: 5.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\oxford_iiit_pet.py
ENCODING: utf-8
```py
import os
import os.path
import pathlib
from typing import Any, Callable, Optional, Sequence, Tuple, Union
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class OxfordIIITPet(VisionDataset):
"""`Oxford-IIIT Pet Dataset <https://www.robots.ox.ac.uk/~vgg/data/pets/>`_.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"trainval"`` (default) or ``"test"``.
target_types (string, sequence of strings, optional): Types of target to use. Can be ``category`` (default) or
``segmentation``. Can also be a list to output a tuple with all specified target types. The types represent:
- ``category`` (int): Label for one of the 37 pet categories.
- ``binary-category`` (int): Binary label for cat or dog.
- ``segmentation`` (PIL image): Segmentation trimap of the image.
If empty, ``None`` will be returned as target.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
transforms (callable, optional): A function/transform that takes input sample
and its target as entry and returns a transformed version.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/oxford-iiit-pet``. If dataset is already downloaded, it is not downloaded again.
"""
_RESOURCES = (
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"),
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"),
)
_VALID_TARGET_TYPES = ("category", "binary-category", "segmentation")
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "trainval",
target_types: Union[Sequence[str], str] = "category",
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
):
self._split = verify_str_arg(split, "split", ("trainval", "test"))
if isinstance(target_types, str):
target_types = [target_types]
self._target_types = [
verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types
]
super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet"
self._images_folder = self._base_folder / "images"
self._anns_folder = self._base_folder / "annotations"
self._segs_folder = self._anns_folder / "trimaps"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
image_ids = []
self._labels = []
self._bin_labels = []
with open(self._anns_folder / f"{self._split}.txt") as file:
for line in file:
image_id, label, bin_label, _ = line.strip().split()
image_ids.append(image_id)
self._labels.append(int(label) - 1)
self._bin_labels.append(int(bin_label) - 1)
self.bin_classes = ["Cat", "Dog"]
self.classes = [
" ".join(part.title() for part in raw_cls.split("_"))
for raw_cls, _ in sorted(
{(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)},
key=lambda image_id_and_label: image_id_and_label[1],
)
]
self.bin_class_to_idx = dict(zip(self.bin_classes, range(len(self.bin_classes))))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids]
self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids]
def __len__(self) -> int:
return len(self._images)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image = Image.open(self._images[idx]).convert("RGB")
target: Any = []
for target_type in self._target_types:
if target_type == "category":
target.append(self._labels[idx])
elif target_type == "binary-category":
target.append(self._bin_labels[idx])
else: # target_type == "segmentation"
target.append(Image.open(self._segs[idx]))
if not target:
target = None
elif len(target) == 1:
target = target[0]
else:
target = tuple(target)
if self.transforms:
image, target = self.transforms(image, target)
return image, target
def _check_exists(self) -> bool:
for folder in (self._images_folder, self._anns_folder):
if not (os.path.exists(folder) and os.path.isdir(folder)):
return False
else:
return True
def _download(self) -> None:
if self._check_exists():
return
for url, md5 in self._RESOURCES:
download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)
```
|
====================================================================================================================
SOURCE CODE FILE: pcam.py
LINES: 1
SIZE: 5.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\pcam.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .utils import _decompress, download_file_from_google_drive, verify_str_arg
from .vision import VisionDataset
class PCAM(VisionDataset):
"""`PCAM Dataset <https://github.com/basveeling/pcam>`_.
The PatchCamelyon dataset is a binary classification dataset with 327,680
color images (96px x 96px), extracted from histopathologic scans of lymph node
sections. Each image is annotated with a binary label indicating presence of
metastatic tissue.
This dataset requires the ``h5py`` package which you can install with ``pip install h5py``.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"test"`` or ``"val"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into ``root/pcam``. If
dataset is already downloaded, it is not downloaded again.
.. warning::
To download the dataset `gdown <https://github.com/wkentaro/gdown>`_ is required.
"""
_FILES = {
"train": {
"images": (
"camelyonpatch_level_2_split_train_x.h5", # Data file name
"1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2", # Google Drive ID
"1571f514728f59376b705fc836ff4b63", # md5 hash
),
"targets": (
"camelyonpatch_level_2_split_train_y.h5",
"1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
"35c2d7259d906cfc8143347bb8e05be7",
),
},
"test": {
"images": (
"camelyonpatch_level_2_split_test_x.h5",
"1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
"d8c2d60d490dbd479f8199bdfa0cf6ec",
),
"targets": (
"camelyonpatch_level_2_split_test_y.h5",
"17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
"60a7035772fbdb7f34eb86d4420cf66a",
),
},
"val": {
"images": (
"camelyonpatch_level_2_split_valid_x.h5",
"1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
"d5b63470df7cfa627aeec8b9dc0c066e",
),
"targets": (
"camelyonpatch_level_2_split_valid_y.h5",
"1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
"2b85f58b927af9964a4c15b8f7e8f179",
),
},
}
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
):
try:
import h5py
self.h5py = h5py
except ImportError:
raise RuntimeError(
"h5py is not found. This dataset needs to have h5py installed: please run pip install h5py"
)
self._split = verify_str_arg(split, "split", ("train", "test", "val"))
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "pcam"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
def __len__(self) -> int:
images_file = self._FILES[self._split]["images"][0]
with self.h5py.File(self._base_folder / images_file) as images_data:
return images_data["x"].shape[0]
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
images_file = self._FILES[self._split]["images"][0]
with self.h5py.File(self._base_folder / images_file) as images_data:
image = Image.fromarray(images_data["x"][idx]).convert("RGB")
targets_file = self._FILES[self._split]["targets"][0]
with self.h5py.File(self._base_folder / targets_file) as targets_data:
target = int(targets_data["y"][idx, 0, 0, 0]) # shape is [num_images, 1, 1, 1]
if self.transform:
image = self.transform(image)
if self.target_transform:
target = self.target_transform(target)
return image, target
def _check_exists(self) -> bool:
images_file = self._FILES[self._split]["images"][0]
targets_file = self._FILES[self._split]["targets"][0]
return all(self._base_folder.joinpath(h5_file).exists() for h5_file in (images_file, targets_file))
def _download(self) -> None:
if self._check_exists():
return
for file_name, file_id, md5 in self._FILES[self._split].values():
archive_name = file_name + ".gz"
download_file_from_google_drive(file_id, str(self._base_folder), filename=archive_name, md5=md5)
_decompress(str(self._base_folder / archive_name))
```
|
=========================================================================================================================
SOURCE CODE FILE: phototour.py
LINES: 1
SIZE: 7.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\phototour.py
ENCODING: utf-8
```py
import os
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from PIL import Image
from .utils import download_url
from .vision import VisionDataset
class PhotoTour(VisionDataset):
"""`Multi-view Stereo Correspondence <http://matthewalunbrown.com/patchdata/patchdata.html>`_ Dataset.
.. note::
We only provide the newer version of the dataset, since the authors state that it
is more suitable for training descriptors based on difference of Gaussian, or Harris corners, as the
patches are centred on real interest point detections, rather than being projections of 3D points as is the
case in the old dataset.
The original dataset is available under http://phototour.cs.washington.edu/patches/default.htm.
Args:
root (str or ``pathlib.Path``): Root directory where images are.
name (string): Name of the dataset to load.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
urls = {
"notredame_harris": [
"http://matthewalunbrown.com/patchdata/notredame_harris.zip",
"notredame_harris.zip",
"69f8c90f78e171349abdf0307afefe4d",
],
"yosemite_harris": [
"http://matthewalunbrown.com/patchdata/yosemite_harris.zip",
"yosemite_harris.zip",
"a73253d1c6fbd3ba2613c45065c00d46",
],
"liberty_harris": [
"http://matthewalunbrown.com/patchdata/liberty_harris.zip",
"liberty_harris.zip",
"c731fcfb3abb4091110d0ae8c7ba182c",
],
"notredame": [
"http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip",
"notredame.zip",
"509eda8535847b8c0a90bbb210c83484",
],
"yosemite": ["http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip", "yosemite.zip", "533b2e8eb7ede31be40abc317b2fd4f0"],
"liberty": ["http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip", "liberty.zip", "fdd9152f138ea5ef2091746689176414"],
}
means = {
"notredame": 0.4854,
"yosemite": 0.4844,
"liberty": 0.4437,
"notredame_harris": 0.4854,
"yosemite_harris": 0.4844,
"liberty_harris": 0.4437,
}
stds = {
"notredame": 0.1864,
"yosemite": 0.1818,
"liberty": 0.2019,
"notredame_harris": 0.1864,
"yosemite_harris": 0.1818,
"liberty_harris": 0.2019,
}
lens = {
"notredame": 468159,
"yosemite": 633587,
"liberty": 450092,
"liberty_harris": 379587,
"yosemite_harris": 450912,
"notredame_harris": 325295,
}
image_ext = "bmp"
info_file = "info.txt"
matches_files = "m50_100000_100000_0.txt"
def __init__(
self,
root: Union[str, Path],
name: str,
train: bool = True,
transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform)
self.name = name
self.data_dir = os.path.join(self.root, name)
self.data_down = os.path.join(self.root, f"{name}.zip")
self.data_file = os.path.join(self.root, f"{name}.pt")
self.train = train
self.mean = self.means[name]
self.std = self.stds[name]
if download:
self.download()
if not self._check_datafile_exists():
self.cache()
# load the serialized data
self.data, self.labels, self.matches = torch.load(self.data_file, weights_only=True)
def __getitem__(self, index: int) -> Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]]:
"""
Args:
index (int): Index
Returns:
tuple: (data1, data2, matches)
"""
if self.train:
data = self.data[index]
if self.transform is not None:
data = self.transform(data)
return data
m = self.matches[index]
data1, data2 = self.data[m[0]], self.data[m[1]]
if self.transform is not None:
data1 = self.transform(data1)
data2 = self.transform(data2)
return data1, data2, m[2]
def __len__(self) -> int:
return len(self.data if self.train else self.matches)
def _check_datafile_exists(self) -> bool:
return os.path.exists(self.data_file)
def _check_downloaded(self) -> bool:
return os.path.exists(self.data_dir)
def download(self) -> None:
if self._check_datafile_exists():
return
if not self._check_downloaded():
# download files
url = self.urls[self.name][0]
filename = self.urls[self.name][1]
md5 = self.urls[self.name][2]
fpath = os.path.join(self.root, filename)
download_url(url, self.root, filename, md5)
import zipfile
with zipfile.ZipFile(fpath, "r") as z:
z.extractall(self.data_dir)
os.unlink(fpath)
def cache(self) -> None:
# process and save as torch files
dataset = (
read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
read_info_file(self.data_dir, self.info_file),
read_matches_files(self.data_dir, self.matches_files),
)
with open(self.data_file, "wb") as f:
torch.save(dataset, f)
def extra_repr(self) -> str:
split = "Train" if self.train is True else "Test"
return f"Split: {split}"
def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor:
"""Return a Tensor containing the patches"""
def PIL2array(_img: Image.Image) -> np.ndarray:
"""Convert PIL image type to numpy 2D array"""
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
def find_files(_data_dir: str, _image_ext: str) -> List[str]:
"""Return a list with the file names of the images containing the patches"""
files = []
# find those files with the specified extension
for file_dir in os.listdir(_data_dir):
if file_dir.endswith(_image_ext):
files.append(os.path.join(_data_dir, file_dir))
return sorted(files) # sort files in ascend order to keep relations
patches = []
list_files = find_files(data_dir, image_ext)
for fpath in list_files:
img = Image.open(fpath)
for y in range(0, img.height, 64):
for x in range(0, img.width, 64):
patch = img.crop((x, y, x + 64, y + 64))
patches.append(PIL2array(patch))
return torch.ByteTensor(np.array(patches[:n]))
def read_info_file(data_dir: str, info_file: str) -> torch.Tensor:
"""Return a Tensor containing the list of labels
Read the file and keep only the ID of the 3D point.
"""
with open(os.path.join(data_dir, info_file)) as f:
labels = [int(line.split()[0]) for line in f]
return torch.LongTensor(labels)
def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor:
"""Return a Tensor containing the ground truth matches
Read the file and keep only 3D point ID.
Matches are represented with a 1, non matches with a 0.
"""
matches = []
with open(os.path.join(data_dir, matches_file)) as f:
for line in f:
line_split = line.split()
matches.append([int(line_split[0]), int(line_split[3]), int(line_split[1] == line_split[4])])
return torch.LongTensor(matches)
```
|
=========================================================================================================================
SOURCE CODE FILE: places365.py
LINES: 2
SIZE: 7.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\places365.py
ENCODING: utf-8
```py
import os
from os import path
from pathlib import Path
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
from urllib.parse import urljoin
from .folder import default_loader
from .utils import check_integrity, download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Places365(VisionDataset):
r"""`Places365 <http://places2.csail.mit.edu/index.html>`_ classification dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the Places365 dataset.
split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challenge``,
``val``, ``test``.
small (bool, optional): If ``True``, uses the small images, i.e. resized to 256 x 256 pixels, instead of the
high resolution ones.
download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
downloaded archives are not downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
Raises:
RuntimeError: If ``download is False`` and the meta files, i.e. the devkit, are not present or corrupted.
RuntimeError: If ``download is True`` and the image archive is already extracted.
"""
_SPLITS = ("train-standard", "train-challenge", "val", "test")
_BASE_URL = "http://data.csail.mit.edu/places/places365/"
# {variant: (archive, md5)}
_DEVKIT_META = {
"standard": ("filelist_places365-standard.tar", "35a0585fee1fa656440f3ab298f8479c"),
"challenge": ("filelist_places365-challenge.tar", "70a8307e459c3de41690a7c76c931734"),
}
# (file, md5)
_CATEGORIES_META = ("categories_places365.txt", "06c963b85866bd0649f97cb43dd16673")
# {split: (file, md5)}
_FILE_LIST_META = {
"train-standard": ("places365_train_standard.txt", "30f37515461640559006b8329efbed1a"),
"train-challenge": ("places365_train_challenge.txt", "b2931dc997b8c33c27e7329c073a6b57"),
"val": ("places365_val.txt", "e9f2fd57bfd9d07630173f4e8708e4b1"),
"test": ("places365_test.txt", "2fce8233fe493576d724142e45d93653"),
}
# {(split, small): (file, md5)}
_IMAGES_META = {
("train-standard", False): ("train_large_places365standard.tar", "67e186b496a84c929568076ed01a8aa1"),
("train-challenge", False): ("train_large_places365challenge.tar", "605f18e68e510c82b958664ea134545f"),
("val", False): ("val_large.tar", "9b71c4993ad89d2d8bcbdc4aef38042f"),
("test", False): ("test_large.tar", "41a4b6b724b1d2cd862fb3871ed59913"),
("train-standard", True): ("train_256_places365standard.tar", "53ca1c756c3d1e7809517cc47c5561c5"),
("train-challenge", True): ("train_256_places365challenge.tar", "741915038a5e3471ec7332404dfb64ef"),
("val", True): ("val_256.tar", "e27b17d8d44f4af9a78502beb927f808"),
("test", True): ("test_256.tar", "f532f6ad7b582262a2ec8009075e186b"),
}
def __init__(
self,
root: Union[str, Path],
split: str = "train-standard",
small: bool = False,
download: bool = False,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = self._verify_split(split)
self.small = small
self.loader = loader
self.classes, self.class_to_idx = self.load_categories(download)
self.imgs, self.targets = self.load_file_list(download)
if download:
self.download_images()
def __getitem__(self, index: int) -> Tuple[Any, Any]:
file, target = self.imgs[index]
image = self.loader(file)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.imgs)
@property
def variant(self) -> str:
return "challenge" if "challenge" in self.split else "standard"
@property
def images_dir(self) -> str:
size = "256" if self.small else "large"
if self.split.startswith("train"):
dir = f"data_{size}_{self.variant}"
else:
dir = f"{self.split}_{size}"
return path.join(self.root, dir)
def load_categories(self, download: bool = True) -> Tuple[List[str], Dict[str, int]]:
def process(line: str) -> Tuple[str, int]:
cls, idx = line.split()
return cls, int(idx)
file, md5 = self._CATEGORIES_META
file = path.join(self.root, file)
if not self._check_integrity(file, md5, download):
self.download_devkit()
with open(file) as fh:
class_to_idx = dict(process(line) for line in fh)
return sorted(class_to_idx.keys()), class_to_idx
def load_file_list(
self, download: bool = True
) -> Tuple[List[Tuple[str, Union[int, None]]], List[Union[int, None]]]:
def process(line: str, sep="/") -> Tuple[str, Union[int, None]]:
image, idx = (line.split() + [None])[:2]
image = cast(str, image)
idx = int(idx) if idx is not None else None
return path.join(self.images_dir, image.lstrip(sep).replace(sep, os.sep)), idx
file, md5 = self._FILE_LIST_META[self.split]
file = path.join(self.root, file)
if not self._check_integrity(file, md5, download):
self.download_devkit()
with open(file) as fh:
images = [process(line) for line in fh]
_, targets = zip(*images)
return images, list(targets)
def download_devkit(self) -> None:
file, md5 = self._DEVKIT_META[self.variant]
download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)
def download_images(self) -> None:
if path.exists(self.images_dir):
return
file, md5 = self._IMAGES_META[(self.split, self.small)]
download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)
if self.split.startswith("train"):
os.rename(self.images_dir.rsplit("_", 1)[0], self.images_dir)
def extra_repr(self) -> str:
return "\n".join(("Split: {split}", "Small: {small}")).format(**self.__dict__)
def _verify_split(self, split: str) -> str:
return verify_str_arg(split, "split", self._SPLITS)
def _check_integrity(self, file: str, md5: str, download: bool) -> bool:
integrity = check_integrity(file, md5=md5)
if not integrity and not download:
raise RuntimeError(
f"The file {file} does not exist or is corrupted. You can set download=True to download it."
)
return integrity
```
|
=============================================================================================================================
SOURCE CODE FILE: rendered_sst2.py
LINES: 1
SIZE: 3.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\rendered_sst2.py
ENCODING: utf-8
```py
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
self.loader = loader
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
```
|
=================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\samplers\__init__.py
ENCODING: utf-8
```py
from .clip_sampler import DistributedSampler, RandomClipSampler, UniformClipSampler
__all__ = ("DistributedSampler", "UniformClipSampler", "RandomClipSampler")
```
|
=====================================================================================================================================
SOURCE CODE FILE: clip_sampler.py
LINES: 1
SIZE: 6.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\samplers\clip_sampler.py
ENCODING: utf-8
```py
import math
from typing import cast, Iterator, List, Optional, Sized, Union
import torch
import torch.distributed as dist
from torch.utils.data import Sampler
from torchvision.datasets.video_utils import VideoClips
class DistributedSampler(Sampler):
"""
Extension of DistributedSampler, as discussed in
https://github.com/pytorch/pytorch/issues/23430
Example:
dataset: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
num_replicas: 4
shuffle: False
when group_size = 1
RANK | shard_dataset
=========================
rank_0 | [0, 4, 8, 12]
rank_1 | [1, 5, 9, 13]
rank_2 | [2, 6, 10, 0]
rank_3 | [3, 7, 11, 1]
when group_size = 2
RANK | shard_dataset
=========================
rank_0 | [0, 1, 8, 9]
rank_1 | [2, 3, 10, 11]
rank_2 | [4, 5, 12, 13]
rank_3 | [6, 7, 0, 1]
"""
def __init__(
self,
dataset: Sized,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = False,
group_size: int = 1,
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if len(dataset) % group_size != 0:
raise ValueError(
f"dataset length must be a multiplier of group size dataset length: {len(dataset)}, group size: {group_size}"
)
self.dataset = dataset
self.group_size = group_size
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
dataset_group_length = len(dataset) // group_size
self.num_group_samples = int(math.ceil(dataset_group_length * 1.0 / self.num_replicas))
self.num_samples = self.num_group_samples * group_size
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self) -> Iterator[int]:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices: Union[torch.Tensor, List[int]]
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
total_group_size = self.total_size // self.group_size
indices = torch.reshape(torch.LongTensor(indices), (total_group_size, self.group_size))
# subsample
indices = indices[self.rank : total_group_size : self.num_replicas, :]
indices = torch.reshape(indices, (-1,)).tolist()
assert len(indices) == self.num_samples
if isinstance(self.dataset, Sampler):
orig_indices = list(iter(self.dataset))
indices = [orig_indices[i] for i in indices]
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
self.epoch = epoch
class UniformClipSampler(Sampler):
"""
Sample `num_video_clips_per_video` clips for each video, equally spaced.
When number of unique clips in the video is fewer than num_video_clips_per_video,
repeat the clips until `num_video_clips_per_video` clips are collected
Args:
video_clips (VideoClips): video clips to sample from
num_clips_per_video (int): number of clips to be sampled per video
"""
def __init__(self, video_clips: VideoClips, num_clips_per_video: int) -> None:
if not isinstance(video_clips, VideoClips):
raise TypeError(f"Expected video_clips to be an instance of VideoClips, got {type(video_clips)}")
self.video_clips = video_clips
self.num_clips_per_video = num_clips_per_video
def __iter__(self) -> Iterator[int]:
idxs = []
s = 0
# select num_clips_per_video for each video, uniformly spaced
for c in self.video_clips.clips:
length = len(c)
if length == 0:
# corner case where video decoding fails
continue
sampled = torch.linspace(s, s + length - 1, steps=self.num_clips_per_video).floor().to(torch.int64)
s += length
idxs.append(sampled)
return iter(cast(List[int], torch.cat(idxs).tolist()))
def __len__(self) -> int:
return sum(self.num_clips_per_video for c in self.video_clips.clips if len(c) > 0)
class RandomClipSampler(Sampler):
"""
Samples at most `max_video_clips_per_video` clips for each video randomly
Args:
video_clips (VideoClips): video clips to sample from
max_clips_per_video (int): maximum number of clips to be sampled per video
"""
def __init__(self, video_clips: VideoClips, max_clips_per_video: int) -> None:
if not isinstance(video_clips, VideoClips):
raise TypeError(f"Expected video_clips to be an instance of VideoClips, got {type(video_clips)}")
self.video_clips = video_clips
self.max_clips_per_video = max_clips_per_video
def __iter__(self) -> Iterator[int]:
idxs = []
s = 0
# select at most max_clips_per_video for each video, randomly
for c in self.video_clips.clips:
length = len(c)
size = min(length, self.max_clips_per_video)
sampled = torch.randperm(length)[:size] + s
s += length
idxs.append(sampled)
idxs_ = torch.cat(idxs)
# shuffle all clips randomly
perm = torch.randperm(len(idxs_))
return iter(idxs_[perm].tolist())
def __len__(self) -> int:
return sum(min(len(c), self.max_clips_per_video) for c in self.video_clips.clips)
```
|
===================================================================================================================
SOURCE CODE FILE: sbd.py
LINES: 3
SIZE: 5.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\sbd.py
ENCODING: utf-8
```py
import os
import shutil
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class SBDataset(VisionDataset):
"""`Semantic Boundaries Dataset <http://home.bharathh.info/pubs/codes/SBD/download.html>`_
The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset.
.. note ::
Please note that the train and val splits included with this dataset are different from
the splits in the PASCAL VOC dataset. In particular some "train" images might be part of
VOC2012 val.
If you are interested in testing on VOC 2012 val, then use `image_set='train_noval'`,
which excludes all val images.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (str or ``pathlib.Path``): Root directory of the Semantic Boundaries Dataset
image_set (string, optional): Select the image_set to use, ``train``, ``val`` or ``train_noval``.
Image set ``train_noval`` excludes VOC 2012 val images.
mode (string, optional): Select target type. Possible values 'boundaries' or 'segmentation'.
In case of 'boundaries', the target is an array of shape `[num_classes, H, W]`,
where `num_classes=20`.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version. Input sample is PIL image and target is a numpy array
if `mode='boundaries'` or PIL image if `mode='segmentation'`.
"""
url = "https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz"
md5 = "82b4d87ceb2ed10f6038a1cba92111cb"
filename = "benchmark.tgz"
voc_train_url = "https://www.cs.cornell.edu/~bharathh/train_noval.txt"
voc_split_filename = "train_noval.txt"
voc_split_md5 = "79bff800c5f0b1ec6b21080a3c066722"
def __init__(
self,
root: Union[str, Path],
image_set: str = "train",
mode: str = "boundaries",
download: bool = False,
transforms: Optional[Callable] = None,
) -> None:
try:
from scipy.io import loadmat
self._loadmat = loadmat
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transforms)
self.image_set = verify_str_arg(image_set, "image_set", ("train", "val", "train_noval"))
self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries"))
self.num_classes = 20
sbd_root = self.root
image_dir = os.path.join(sbd_root, "img")
mask_dir = os.path.join(sbd_root, "cls")
if download:
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5)
extracted_ds_root = os.path.join(self.root, "benchmark_RELEASE", "dataset")
for f in ["cls", "img", "inst", "train.txt", "val.txt"]:
old_path = os.path.join(extracted_ds_root, f)
shutil.move(old_path, sbd_root)
if self.image_set == "train_noval":
# Note: this is failing as of June 2024 https://github.com/pytorch/vision/issues/8471
download_url(self.voc_train_url, sbd_root, self.voc_split_filename, self.voc_split_md5)
if not os.path.isdir(sbd_root):
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
split_f = os.path.join(sbd_root, image_set.rstrip("\n") + ".txt")
with open(os.path.join(split_f)) as fh:
file_names = [x.strip() for x in fh.readlines()]
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
self.masks = [os.path.join(mask_dir, x + ".mat") for x in file_names]
self._get_target = self._get_segmentation_target if self.mode == "segmentation" else self._get_boundaries_target
def _get_segmentation_target(self, filepath: str) -> Image.Image:
mat = self._loadmat(filepath)
return Image.fromarray(mat["GTcls"][0]["Segmentation"][0])
def _get_boundaries_target(self, filepath: str) -> np.ndarray:
mat = self._loadmat(filepath)
return np.concatenate(
[np.expand_dims(mat["GTcls"][0]["Boundaries"][0][i][0].toarray(), axis=0) for i in range(self.num_classes)],
axis=0,
)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
img = Image.open(self.images[index]).convert("RGB")
target = self._get_target(self.masks[index])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self) -> int:
return len(self.images)
def extra_repr(self) -> str:
lines = ["Image set: {image_set}", "Mode: {mode}"]
return "\n".join(lines).format(**self.__dict__)
```
|
===================================================================================================================
SOURCE CODE FILE: sbu.py
LINES: 1
SIZE: 4.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\sbu.py
ENCODING: utf-8
```py
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.loader = loader
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
```
|
=======================================================================================================================
SOURCE CODE FILE: semeion.py
LINES: 1
SIZE: 3.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\semeion.py
ENCODING: utf-8
```py
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url
from .vision import VisionDataset
class SEMEION(VisionDataset):
r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``semeion.py`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
filename = "semeion.data"
md5_checksum = "cb545d371d2ce14ec121470795a77432"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
# convert value to 8 bit unsigned integer
# color (white #255) the pixels
self.data = (data[:, :256] * 255).astype("uint8")
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum)
```
|
=============================================================================================================================
SOURCE CODE FILE: stanford_cars.py
LINES: 1
SIZE: 4.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\stanford_cars.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""Stanford Cars Dataset
The Cars dataset contains 16,185 images of 196 classes of cars. The data is
split into 8,144 training images and 8,041 testing images, where each class
has been split roughly in a 50-50 split
The original URL is https://ai.stanford.edu/~jkrause/cars/car_dataset.html,
the dataset isn't available online anymore.
.. note::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (str or ``pathlib.Path``): Root directory of dataset
split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): This parameter exists for backward compatibility but it does not
download the dataset, since the original URL is not available anymore.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[str], Any] = default_loader,
) -> None:
try:
import scipy.io as sio
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "stanford_cars"
devkit = self._base_folder / "devkit"
if self._split == "train":
self._annotations_mat_path = devkit / "cars_train_annos.mat"
self._images_base_path = self._base_folder / "cars_train"
else:
self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
self._images_base_path = self._base_folder / "cars_test"
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found.")
self._samples = [
(
str(self._images_base_path / annotation["fname"]),
annotation["class"] - 1, # Original target mapping starts from 1, hence -1
)
for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
]
self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
self.loader = loader
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
"""Returns pil_image and class_id for given index"""
image_path, target = self._samples[idx]
image = self.loader(image_path)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def _check_exists(self) -> bool:
if not (self._base_folder / "devkit").is_dir():
return False
return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
def download(self):
raise ValueError("The original URL is broken so the StanfordCars dataset cannot be downloaded anymore.")
```
|
=====================================================================================================================
SOURCE CODE FILE: stl10.py
LINES: 1
SIZE: 7.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\stl10.py
ENCODING: utf-8
```py
import os.path
from pathlib import Path
from typing import Any, Callable, cast, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class STL10(VisionDataset):
"""`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``stl10_binary`` exists.
split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
Accordingly, dataset is selected.
folds (int, optional): One of {0-9} or None.
For training, loads one of the 10 pre-defined folds of 1k samples for the
standard evaluation procedure. If no value is passed, loads the 5k samples.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = "stl10_binary"
url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
filename = "stl10_binary.tar.gz"
tgz_md5 = "91f7769df0f17e558f3565bffb0c7dfb"
class_names_file = "class_names.txt"
folds_list_file = "fold_indices.txt"
train_list = [
["train_X.bin", "918c2871b30a85fa023e0c44e0bee87f"],
["train_y.bin", "5a34089d4802c674881badbb80307741"],
["unlabeled_X.bin", "5242ba1fed5e4be9e1e742405eb56ca4"],
]
test_list = [["test_X.bin", "7f263ba9f9e0b06b93213547f721ac82"], ["test_y.bin", "36f9794fa4beb8a2c72628de14fa638e"]]
splits = ("train", "train+unlabeled", "unlabeled", "test")
def __init__(
self,
root: Union[str, Path],
split: str = "train",
folds: Optional[int] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", self.splits)
self.folds = self._verify_folds(folds)
if download:
self.download()
elif not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# now load the picked numpy arrays
self.labels: Optional[np.ndarray]
if self.split == "train":
self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
self.labels = cast(np.ndarray, self.labels)
self.__load_folds(folds)
elif self.split == "train+unlabeled":
self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
self.labels = cast(np.ndarray, self.labels)
self.__load_folds(folds)
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate((self.labels, np.asarray([-1] * unlabeled_data.shape[0])))
elif self.split == "unlabeled":
self.data, _ = self.__loadfile(self.train_list[2][0])
self.labels = np.asarray([-1] * self.data.shape[0])
else: # self.split == 'test':
self.data, self.labels = self.__loadfile(self.test_list[0][0], self.test_list[1][0])
class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
if os.path.isfile(class_file):
with open(class_file) as f:
self.classes = f.read().splitlines()
def _verify_folds(self, folds: Optional[int]) -> Optional[int]:
if folds is None:
return folds
elif isinstance(folds, int):
if folds in range(10):
return folds
msg = "Value for argument folds should be in the range [0, 10), but got {}."
raise ValueError(msg.format(folds))
else:
msg = "Expected type None or int for argument folds, but got type {}."
raise ValueError(msg.format(type(folds)))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
target: Optional[int]
if self.labels is not None:
img, target = self.data[index], int(self.labels[index])
else:
img, target = self.data[index], None
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return self.data.shape[0]
def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
labels = None
if labels_file:
path_to_labels = os.path.join(self.root, self.base_folder, labels_file)
with open(path_to_labels, "rb") as f:
labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based
path_to_data = os.path.join(self.root, self.base_folder, data_file)
with open(path_to_data, "rb") as f:
# read whole file in uint8 chunks
everything = np.fromfile(f, dtype=np.uint8)
images = np.reshape(everything, (-1, 3, 96, 96))
images = np.transpose(images, (0, 1, 3, 2))
return images, labels
def _check_integrity(self) -> bool:
for filename, md5 in self.train_list + self.test_list:
fpath = os.path.join(self.root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
self._check_integrity()
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
def __load_folds(self, folds: Optional[int]) -> None:
# loads one of the folds if specified
if folds is None:
return
path_to_folds = os.path.join(self.root, self.base_folder, self.folds_list_file)
with open(path_to_folds) as f:
str_idx = f.read().splitlines()[folds]
list_idx = np.fromstring(str_idx, dtype=np.int64, sep=" ")
self.data = self.data[list_idx, :, :, :]
if self.labels is not None:
self.labels = self.labels[list_idx]
```
|
======================================================================================================================
SOURCE CODE FILE: sun397.py
LINES: 1
SIZE: 3.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\sun397.py
ENCODING: utf-8
```py
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[Union[str, Path]], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
self.loader = loader
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
```
|
====================================================================================================================
SOURCE CODE FILE: svhn.py
LINES: 1
SIZE: 4.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\svhn.py
ENCODING: utf-8
```py
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset where the data is stored.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np.ndarray of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
```
|
======================================================================================================================
SOURCE CODE FILE: ucf101.py
LINES: 1
SIZE: 5.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\ucf101.py
ENCODING: utf-8
```py
import os
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torch import Tensor
from .folder import find_classes, make_dataset
from .video_utils import VideoClips
from .vision import VisionDataset
class UCF101(VisionDataset):
"""
`UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
UCF101 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
by ``frames_per_clip``, where the step in frames between each clip is given by
``step_between_clips``. The dataset itself can be downloaded from the dataset website;
annotations that ``annotation_path`` should be pointing to can be downloaded from `here
<https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip>`_.
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
elements will come from video 1, and the next three elements from video 2.
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
frames in a video might be present.
Internally, it uses a VideoClips object to handle clip creation.
Args:
root (str or ``pathlib.Path``): Root directory of the UCF101 Dataset.
annotation_path (str): path to the folder containing the split files;
see docstring above for download instructions of these files
frames_per_clip (int): number of frames in a clip.
step_between_clips (int, optional): number of frames between each clip.
fold (int, optional): which fold to use. Should be between 1 and 3.
train (bool, optional): if ``True``, creates a dataset from the train split,
otherwise from the ``test`` split.
transform (callable, optional): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
output_format (str, optional): The format of the output video tensors (before transforms).
Can be either "THWC" (default) or "TCHW".
Returns:
tuple: A 3-tuple with the following entries:
- video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames
- audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
and `L` is the number of points
- label (int): class of the video clip
"""
def __init__(
self,
root: Union[str, Path],
annotation_path: str,
frames_per_clip: int,
step_between_clips: int = 1,
frame_rate: Optional[int] = None,
fold: int = 1,
train: bool = True,
transform: Optional[Callable] = None,
_precomputed_metadata: Optional[Dict[str, Any]] = None,
num_workers: int = 1,
_video_width: int = 0,
_video_height: int = 0,
_video_min_dimension: int = 0,
_audio_samples: int = 0,
output_format: str = "THWC",
) -> None:
super().__init__(root)
if not 1 <= fold <= 3:
raise ValueError(f"fold should be between 1 and 3, got {fold}")
extensions = ("avi",)
self.fold = fold
self.train = train
self.classes, class_to_idx = find_classes(self.root)
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
video_list = [x[0] for x in self.samples]
video_clips = VideoClips(
video_list,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
num_workers=num_workers,
_video_width=_video_width,
_video_height=_video_height,
_video_min_dimension=_video_min_dimension,
_audio_samples=_audio_samples,
output_format=output_format,
)
# we bookkeep the full version of video clips because we want to be able
# to return the metadata of full version rather than the subset version of
# video clips
self.full_video_clips = video_clips
self.indices = self._select_fold(video_list, annotation_path, fold, train)
self.video_clips = video_clips.subset(self.indices)
self.transform = transform
@property
def metadata(self) -> Dict[str, Any]:
return self.full_video_clips.metadata
def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]:
name = "train" if train else "test"
name = f"{name}list{fold:02d}.txt"
f = os.path.join(annotation_path, name)
selected_files = set()
with open(f) as fid:
data = fid.readlines()
data = [x.strip().split(" ")[0] for x in data]
data = [os.path.join(self.root, *x.split("/")) for x in data]
selected_files.update(data)
indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]
return indices
def __len__(self) -> int:
return self.video_clips.num_clips()
def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
video, audio, info, video_idx = self.video_clips.get_clip(idx)
label = self.samples[self.indices[video_idx]][1]
if self.transform is not None:
video = self.transform(video)
return video, audio, label
```
|
====================================================================================================================
SOURCE CODE FILE: usps.py
LINES: 2
SIZE: 3.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\usps.py
ENCODING: utf-8
```py
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import download_url
from .vision import VisionDataset
class USPS(VisionDataset):
"""`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset.
The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``.
The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]``
and make pixel values in ``[0, 255]``.
Args:
root (str or ``pathlib.Path``): Root directory of dataset to store``USPS`` data files.
train (bool, optional): If True, creates dataset from ``usps.bz2``,
otherwise from ``usps.t.bz2``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
"usps.bz2",
"ec16c51db3855ca6c91edd34d0e9b197",
],
"test": [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2",
"usps.t.bz2",
"8ea070ee2aca1ac39742fdd1ef5ed118",
],
}
def __init__(
self,
root: Union[str, Path],
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
split = "train" if train else "test"
url, filename, checksum = self.split_list[split]
full_path = os.path.join(self.root, filename)
if download and not os.path.exists(full_path):
download_url(url, self.root, filename, md5=checksum)
import bz2
with bz2.open(full_path) as fp:
raw_data = [line.decode().split() for line in fp.readlines()]
tmp_list = [[x.split(":")[-1] for x in data[1:]] for data in raw_data]
imgs = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16))
imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
targets = [int(d[0]) - 1 for d in raw_data]
self.data = imgs
self.targets = targets
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
```
|
=====================================================================================================================
SOURCE CODE FILE: utils.py
LINES: 2
SIZE: 16.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\utils.py
ENCODING: utf-8
```py
import bz2
import gzip
import hashlib
import lzma
import os
import os.path
import pathlib
import re
import sys
import tarfile
import urllib
import urllib.error
import urllib.request
import zipfile
from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Tuple, TypeVar, Union
from urllib.parse import urlparse
import numpy as np
import torch
from torch.utils.model_zoo import tqdm
from .._internally_replaced_utils import _download_file_from_remote_location, _is_remote_location_available
USER_AGENT = "pytorch/vision"
def _urlretrieve(url: str, filename: Union[str, pathlib.Path], chunk_size: int = 1024 * 32) -> None:
with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
with open(filename, "wb") as fh, tqdm(total=response.length, unit="B", unit_scale=True) as pbar:
while chunk := response.read(chunk_size):
fh.write(chunk)
pbar.update(len(chunk))
def calculate_md5(fpath: Union[str, pathlib.Path], chunk_size: int = 1024 * 1024) -> str:
# Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are
# not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without
# it torchvision.datasets is unusable in these environments since we perform a MD5 check everywhere.
if sys.version_info >= (3, 9):
md5 = hashlib.md5(usedforsecurity=False)
else:
md5 = hashlib.md5()
with open(fpath, "rb") as f:
while chunk := f.read(chunk_size):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath: Union[str, pathlib.Path], md5: str, **kwargs: Any) -> bool:
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath: Union[str, pathlib.Path], md5: Optional[str] = None) -> bool:
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def _get_redirect_url(url: str, max_hops: int = 3) -> str:
initial_url = url
headers = {"Method": "HEAD", "User-Agent": USER_AGENT}
for _ in range(max_hops + 1):
with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
if response.url == url or response.url is None:
return url
url = response.url
else:
raise RecursionError(
f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
)
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
return None
match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
if match is None:
return None
return match.group("id")
def download_url(
url: str,
root: Union[str, pathlib.Path],
filename: Optional[Union[str, pathlib.Path]] = None,
md5: Optional[str] = None,
max_redirect_hops: int = 3,
) -> None:
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
max_redirect_hops (int, optional): Maximum number of redirect hops allowed
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.fspath(os.path.join(root, filename))
os.makedirs(root, exist_ok=True)
# check if file is already present locally
if check_integrity(fpath, md5):
return
if _is_remote_location_available():
_download_file_from_remote_location(fpath, url)
else:
# expand redirect chain if needed
url = _get_redirect_url(url, max_hops=max_redirect_hops)
# check if file is located on Google Drive
file_id = _get_google_drive_file_id(url)
if file_id is not None:
return download_file_from_google_drive(file_id, root, filename, md5)
# download the file
try:
_urlretrieve(url, fpath)
except (urllib.error.URLError, OSError) as e: # type: ignore[attr-defined]
if url[:5] == "https":
url = url.replace("https:", "http:")
_urlretrieve(url, fpath)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def list_dir(root: Union[str, pathlib.Path], prefix: bool = False) -> List[str]:
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root: Union[str, pathlib.Path], suffix: str, prefix: bool = False) -> List[str]:
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(
file_id: str,
root: Union[str, pathlib.Path],
filename: Optional[Union[str, pathlib.Path]] = None,
md5: Optional[str] = None,
):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
try:
import gdown
except ModuleNotFoundError:
raise RuntimeError(
"To download files from GDrive, 'gdown' is required. You can install it with 'pip install gdown'."
)
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.fspath(os.path.join(root, filename))
os.makedirs(root, exist_ok=True)
if check_integrity(fpath, md5):
return
gdown.download(id=file_id, output=fpath, quiet=False, user_agent=USER_AGENT)
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def _extract_tar(
from_path: Union[str, pathlib.Path], to_path: Union[str, pathlib.Path], compression: Optional[str]
) -> None:
with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
tar.extractall(to_path)
_ZIP_COMPRESSION_MAP: Dict[str, int] = {
".bz2": zipfile.ZIP_BZIP2,
".xz": zipfile.ZIP_LZMA,
}
def _extract_zip(
from_path: Union[str, pathlib.Path], to_path: Union[str, pathlib.Path], compression: Optional[str]
) -> None:
with zipfile.ZipFile(
from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED
) as zip:
zip.extractall(to_path)
_ARCHIVE_EXTRACTORS: Dict[str, Callable[[Union[str, pathlib.Path], Union[str, pathlib.Path], Optional[str]], None]] = {
".tar": _extract_tar,
".zip": _extract_zip,
}
_COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {
".bz2": bz2.open,
".gz": gzip.open,
".xz": lzma.open,
}
_FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {
".tbz": (".tar", ".bz2"),
".tbz2": (".tar", ".bz2"),
".tgz": (".tar", ".gz"),
}
def _detect_file_type(file: Union[str, pathlib.Path]) -> Tuple[str, Optional[str], Optional[str]]:
"""Detect the archive type and/or compression of a file.
Args:
file (str): the filename
Returns:
(tuple): tuple of suffix, archive type, and compression
Raises:
RuntimeError: if file has no suffix or suffix is not supported
"""
suffixes = pathlib.Path(file).suffixes
if not suffixes:
raise RuntimeError(
f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
)
suffix = suffixes[-1]
# check if the suffix is a known alias
if suffix in _FILE_TYPE_ALIASES:
return (suffix, *_FILE_TYPE_ALIASES[suffix])
# check if the suffix is an archive type
if suffix in _ARCHIVE_EXTRACTORS:
return suffix, suffix, None
# check if the suffix is a compression
if suffix in _COMPRESSED_FILE_OPENERS:
# check for suffix hierarchy
if len(suffixes) > 1:
suffix2 = suffixes[-2]
# check if the suffix2 is an archive type
if suffix2 in _ARCHIVE_EXTRACTORS:
return suffix2 + suffix, suffix2, suffix
return suffix, None, suffix
valid_suffixes = sorted(set(_FILE_TYPE_ALIASES) | set(_ARCHIVE_EXTRACTORS) | set(_COMPRESSED_FILE_OPENERS))
raise RuntimeError(f"Unknown compression or archive type: '{suffix}'.\nKnown suffixes are: '{valid_suffixes}'.")
def _decompress(
from_path: Union[str, pathlib.Path],
to_path: Optional[Union[str, pathlib.Path]] = None,
remove_finished: bool = False,
) -> pathlib.Path:
r"""Decompress a file.
The compression is automatically detected from the file name.
Args:
from_path (str): Path to the file to be decompressed.
to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used.
remove_finished (bool): If ``True``, remove the file after the extraction.
Returns:
(str): Path to the decompressed file.
"""
suffix, archive_type, compression = _detect_file_type(from_path)
if not compression:
raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.")
if to_path is None:
to_path = pathlib.Path(os.fspath(from_path).replace(suffix, archive_type if archive_type is not None else ""))
# We don't need to check for a missing key here, since this was already done in _detect_file_type()
compressed_file_opener = _COMPRESSED_FILE_OPENERS[compression]
with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
wfh.write(rfh.read())
if remove_finished:
os.remove(from_path)
return pathlib.Path(to_path)
def extract_archive(
from_path: Union[str, pathlib.Path],
to_path: Optional[Union[str, pathlib.Path]] = None,
remove_finished: bool = False,
) -> Union[str, pathlib.Path]:
"""Extract an archive.
The archive type and a possible compression is automatically detected from the file name. If the file is compressed
but not an archive the call is dispatched to :func:`decompress`.
Args:
from_path (str): Path to the file to be extracted.
to_path (str): Path to the directory the file will be extracted to. If omitted, the directory of the file is
used.
remove_finished (bool): If ``True``, remove the file after the extraction.
Returns:
(str): Path to the directory the file was extracted to.
"""
def path_or_str(ret_path: pathlib.Path) -> Union[str, pathlib.Path]:
if isinstance(from_path, str):
return os.fspath(ret_path)
else:
return ret_path
if to_path is None:
to_path = os.path.dirname(from_path)
suffix, archive_type, compression = _detect_file_type(from_path)
if not archive_type:
ret_path = _decompress(
from_path,
os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")),
remove_finished=remove_finished,
)
return path_or_str(ret_path)
# We don't need to check for a missing key here, since this was already done in _detect_file_type()
extractor = _ARCHIVE_EXTRACTORS[archive_type]
extractor(from_path, to_path, compression)
if remove_finished:
os.remove(from_path)
return path_or_str(pathlib.Path(to_path))
def download_and_extract_archive(
url: str,
download_root: Union[str, pathlib.Path],
extract_root: Optional[Union[str, pathlib.Path]] = None,
filename: Optional[Union[str, pathlib.Path]] = None,
md5: Optional[str] = None,
remove_finished: bool = False,
) -> None:
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable: Iterable) -> str:
return "'" + "', '".join([str(item) for item in iterable]) + "'"
T = TypeVar("T", str, bytes)
def verify_str_arg(
value: T,
arg: Optional[str] = None,
valid_values: Optional[Iterable[T]] = None,
custom_msg: Optional[str] = None,
) -> T:
if not isinstance(value, str):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = "Unknown value '{value}' for argument {arg}. Valid values are {{{valid_values}}}."
msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
def _read_pfm(file_name: Union[str, pathlib.Path], slice_channels: int = 2) -> np.ndarray:
"""Read file in .pfm format. Might contain either 1 or 3 channels of data.
Args:
file_name (str): Path to the file.
slice_channels (int): Number of channels to slice out of the file.
Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
"""
with open(file_name, "rb") as f:
header = f.readline().rstrip()
if header not in [b"PF", b"Pf"]:
raise ValueError("Invalid PFM file")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
if not dim_match:
raise Exception("Malformed PFM header.")
w, h = (int(dim) for dim in dim_match.groups())
scale = float(f.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(f, dtype=endian + "f")
pfm_channels = 3 if header == b"PF" else 1
data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
data = np.flip(data, axis=1) # flip on h dimension
data = data[:slice_channels, :, :]
return data.astype(np.float32)
def _flip_byte_order(t: torch.Tensor) -> torch.Tensor:
return (
t.contiguous().view(torch.uint8).view(*t.shape, t.element_size()).flip(-1).view(*t.shape[:-1], -1).view(t.dtype)
)
```
|
===========================================================================================================================
SOURCE CODE FILE: video_utils.py
LINES: 1
SIZE: 17.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\video_utils.py
ENCODING: utf-8
```py
import bisect
import math
import warnings
from fractions import Fraction
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, TypeVar, Union
import torch
from torchvision.io import _probe_video_from_file, _read_video_from_file, read_video, read_video_timestamps
from .utils import tqdm
T = TypeVar("T")
def pts_convert(pts: int, timebase_from: Fraction, timebase_to: Fraction, round_func: Callable = math.floor) -> int:
"""convert pts between different time bases
Args:
pts: presentation timestamp, float
timebase_from: original timebase. Fraction
timebase_to: new timebase. Fraction
round_func: rounding function.
"""
new_pts = Fraction(pts, 1) * timebase_from / timebase_to
return round_func(new_pts)
def unfold(tensor: torch.Tensor, size: int, step: int, dilation: int = 1) -> torch.Tensor:
"""
similar to tensor.unfold, but with the dilation
and specialized for 1d tensors
Returns all consecutive windows of `size` elements, with
`step` between windows. The distance between each element
in a window is given by `dilation`.
"""
if tensor.dim() != 1:
raise ValueError(f"tensor should have 1 dimension instead of {tensor.dim()}")
o_stride = tensor.stride(0)
numel = tensor.numel()
new_stride = (step * o_stride, dilation * o_stride)
new_size = ((numel - (dilation * (size - 1) + 1)) // step + 1, size)
if new_size[0] < 1:
new_size = (0, size)
return torch.as_strided(tensor, new_size, new_stride)
class _VideoTimestampsDataset:
"""
Dataset used to parallelize the reading of the timestamps
of a list of videos, given their paths in the filesystem.
Used in VideoClips and defined at top level, so it can be
pickled when forking.
"""
def __init__(self, video_paths: List[str]) -> None:
self.video_paths = video_paths
def __len__(self) -> int:
return len(self.video_paths)
def __getitem__(self, idx: int) -> Tuple[List[int], Optional[float]]:
return read_video_timestamps(self.video_paths[idx])
def _collate_fn(x: T) -> T:
"""
Dummy collate function to be used with _VideoTimestampsDataset
"""
return x
class VideoClips:
"""
Given a list of video files, computes all consecutive subvideos of size
`clip_length_in_frames`, where the distance between each subvideo in the
same video is defined by `frames_between_clips`.
If `frame_rate` is specified, it will also resample all the videos to have
the same frame rate, and the clips will refer to this frame rate.
Creating this instance the first time is time-consuming, as it needs to
decode all the videos in `video_paths`. It is recommended that you
cache the results after instantiation of the class.
Recreating the clips for different clip lengths is fast, and can be done
with the `compute_clips` method.
Args:
video_paths (List[str]): paths to the video files
clip_length_in_frames (int): size of a clip in number of frames
frames_between_clips (int): step (in frames) between each clip
frame_rate (float, optional): if specified, it will resample the video
so that it has `frame_rate`, and then the clips will be defined
on the resampled video
num_workers (int): how many subprocesses to use for data loading.
0 means that the data will be loaded in the main process. (default: 0)
output_format (str): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
"""
def __init__(
self,
video_paths: List[str],
clip_length_in_frames: int = 16,
frames_between_clips: int = 1,
frame_rate: Optional[float] = None,
_precomputed_metadata: Optional[Dict[str, Any]] = None,
num_workers: int = 0,
_video_width: int = 0,
_video_height: int = 0,
_video_min_dimension: int = 0,
_video_max_dimension: int = 0,
_audio_samples: int = 0,
_audio_channels: int = 0,
output_format: str = "THWC",
) -> None:
self.video_paths = video_paths
self.num_workers = num_workers
# these options are not valid for pyav backend
self._video_width = _video_width
self._video_height = _video_height
self._video_min_dimension = _video_min_dimension
self._video_max_dimension = _video_max_dimension
self._audio_samples = _audio_samples
self._audio_channels = _audio_channels
self.output_format = output_format.upper()
if self.output_format not in ("THWC", "TCHW"):
raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.")
if _precomputed_metadata is None:
self._compute_frame_pts()
else:
self._init_from_metadata(_precomputed_metadata)
self.compute_clips(clip_length_in_frames, frames_between_clips, frame_rate)
def _compute_frame_pts(self) -> None:
self.video_pts = [] # len = num_videos. Each entry is a tensor of shape (num_frames_in_video,)
self.video_fps: List[float] = [] # len = num_videos
# strategy: use a DataLoader to parallelize read_video_timestamps
# so need to create a dummy dataset first
import torch.utils.data
dl: torch.utils.data.DataLoader = torch.utils.data.DataLoader(
_VideoTimestampsDataset(self.video_paths), # type: ignore[arg-type]
batch_size=16,
num_workers=self.num_workers,
collate_fn=_collate_fn,
)
with tqdm(total=len(dl)) as pbar:
for batch in dl:
pbar.update(1)
batch_pts, batch_fps = list(zip(*batch))
# we need to specify dtype=torch.long because for empty list,
# torch.as_tensor will use torch.float as default dtype. This
# happens when decoding fails and no pts is returned in the list.
batch_pts = [torch.as_tensor(pts, dtype=torch.long) for pts in batch_pts]
self.video_pts.extend(batch_pts)
self.video_fps.extend(batch_fps)
def _init_from_metadata(self, metadata: Dict[str, Any]) -> None:
self.video_paths = metadata["video_paths"]
assert len(self.video_paths) == len(metadata["video_pts"])
self.video_pts = metadata["video_pts"]
assert len(self.video_paths) == len(metadata["video_fps"])
self.video_fps = metadata["video_fps"]
@property
def metadata(self) -> Dict[str, Any]:
_metadata = {
"video_paths": self.video_paths,
"video_pts": self.video_pts,
"video_fps": self.video_fps,
}
return _metadata
def subset(self, indices: List[int]) -> "VideoClips":
video_paths = [self.video_paths[i] for i in indices]
video_pts = [self.video_pts[i] for i in indices]
video_fps = [self.video_fps[i] for i in indices]
metadata = {
"video_paths": video_paths,
"video_pts": video_pts,
"video_fps": video_fps,
}
return type(self)(
video_paths,
clip_length_in_frames=self.num_frames,
frames_between_clips=self.step,
frame_rate=self.frame_rate,
_precomputed_metadata=metadata,
num_workers=self.num_workers,
_video_width=self._video_width,
_video_height=self._video_height,
_video_min_dimension=self._video_min_dimension,
_video_max_dimension=self._video_max_dimension,
_audio_samples=self._audio_samples,
_audio_channels=self._audio_channels,
output_format=self.output_format,
)
@staticmethod
def compute_clips_for_video(
video_pts: torch.Tensor, num_frames: int, step: int, fps: Optional[float], frame_rate: Optional[float] = None
) -> Tuple[torch.Tensor, Union[List[slice], torch.Tensor]]:
if fps is None:
# if for some reason the video doesn't have fps (because doesn't have a video stream)
# set the fps to 1. The value doesn't matter, because video_pts is empty anyway
fps = 1
if frame_rate is None:
frame_rate = fps
total_frames = len(video_pts) * frame_rate / fps
_idxs = VideoClips._resample_video_idx(int(math.floor(total_frames)), fps, frame_rate)
video_pts = video_pts[_idxs]
clips = unfold(video_pts, num_frames, step)
if not clips.numel():
warnings.warn(
"There aren't enough frames in the current video to get a clip for the given clip length and "
"frames between clips. The video (and potentially others) will be skipped."
)
idxs: Union[List[slice], torch.Tensor]
if isinstance(_idxs, slice):
idxs = [_idxs] * len(clips)
else:
idxs = unfold(_idxs, num_frames, step)
return clips, idxs
def compute_clips(self, num_frames: int, step: int, frame_rate: Optional[float] = None) -> None:
"""
Compute all consecutive sequences of clips from video_pts.
Always returns clips of size `num_frames`, meaning that the
last few frames in a video can potentially be dropped.
Args:
num_frames (int): number of frames for the clip
step (int): distance between two clips
frame_rate (int, optional): The frame rate
"""
self.num_frames = num_frames
self.step = step
self.frame_rate = frame_rate
self.clips = []
self.resampling_idxs = []
for video_pts, fps in zip(self.video_pts, self.video_fps):
clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate)
self.clips.append(clips)
self.resampling_idxs.append(idxs)
clip_lengths = torch.as_tensor([len(v) for v in self.clips])
self.cumulative_sizes = clip_lengths.cumsum(0).tolist()
def __len__(self) -> int:
return self.num_clips()
def num_videos(self) -> int:
return len(self.video_paths)
def num_clips(self) -> int:
"""
Number of subclips that are available in the video list.
"""
return self.cumulative_sizes[-1]
def get_clip_location(self, idx: int) -> Tuple[int, int]:
"""
Converts a flattened representation of the indices into a video_idx, clip_idx
representation.
"""
video_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if video_idx == 0:
clip_idx = idx
else:
clip_idx = idx - self.cumulative_sizes[video_idx - 1]
return video_idx, clip_idx
@staticmethod
def _resample_video_idx(num_frames: int, original_fps: float, new_fps: float) -> Union[slice, torch.Tensor]:
step = original_fps / new_fps
if step.is_integer():
# optimization: if step is integer, don't need to perform
# advanced indexing
step = int(step)
return slice(None, None, step)
idxs = torch.arange(num_frames, dtype=torch.float32) * step
idxs = idxs.floor().to(torch.int64)
return idxs
def get_clip(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any], int]:
"""
Gets a subclip from a list of videos.
Args:
idx (int): index of the subclip. Must be between 0 and num_clips().
Returns:
video (Tensor)
audio (Tensor)
info (Dict)
video_idx (int): index of the video in `video_paths`
"""
if idx >= self.num_clips():
raise IndexError(f"Index {idx} out of range ({self.num_clips()} number of clips)")
video_idx, clip_idx = self.get_clip_location(idx)
video_path = self.video_paths[video_idx]
clip_pts = self.clips[video_idx][clip_idx]
from torchvision import get_video_backend
backend = get_video_backend()
if backend == "pyav":
# check for invalid options
if self._video_width != 0:
raise ValueError("pyav backend doesn't support _video_width != 0")
if self._video_height != 0:
raise ValueError("pyav backend doesn't support _video_height != 0")
if self._video_min_dimension != 0:
raise ValueError("pyav backend doesn't support _video_min_dimension != 0")
if self._video_max_dimension != 0:
raise ValueError("pyav backend doesn't support _video_max_dimension != 0")
if self._audio_samples != 0:
raise ValueError("pyav backend doesn't support _audio_samples != 0")
if backend == "pyav":
start_pts = clip_pts[0].item()
end_pts = clip_pts[-1].item()
video, audio, info = read_video(video_path, start_pts, end_pts)
else:
_info = _probe_video_from_file(video_path)
video_fps = _info.video_fps
audio_fps = None
video_start_pts = cast(int, clip_pts[0].item())
video_end_pts = cast(int, clip_pts[-1].item())
audio_start_pts, audio_end_pts = 0, -1
audio_timebase = Fraction(0, 1)
video_timebase = Fraction(_info.video_timebase.numerator, _info.video_timebase.denominator)
if _info.has_audio:
audio_timebase = Fraction(_info.audio_timebase.numerator, _info.audio_timebase.denominator)
audio_start_pts = pts_convert(video_start_pts, video_timebase, audio_timebase, math.floor)
audio_end_pts = pts_convert(video_end_pts, video_timebase, audio_timebase, math.ceil)
audio_fps = _info.audio_sample_rate
video, audio, _ = _read_video_from_file(
video_path,
video_width=self._video_width,
video_height=self._video_height,
video_min_dimension=self._video_min_dimension,
video_max_dimension=self._video_max_dimension,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase=video_timebase,
audio_samples=self._audio_samples,
audio_channels=self._audio_channels,
audio_pts_range=(audio_start_pts, audio_end_pts),
audio_timebase=audio_timebase,
)
info = {"video_fps": video_fps}
if audio_fps is not None:
info["audio_fps"] = audio_fps
if self.frame_rate is not None:
resampling_idx = self.resampling_idxs[video_idx][clip_idx]
if isinstance(resampling_idx, torch.Tensor):
resampling_idx = resampling_idx - resampling_idx[0]
video = video[resampling_idx]
info["video_fps"] = self.frame_rate
assert len(video) == self.num_frames, f"{video.shape} x {self.num_frames}"
if self.output_format == "TCHW":
# [T,H,W,C] --> [T,C,H,W]
video = video.permute(0, 3, 1, 2)
return video, audio, info, video_idx
def __getstate__(self) -> Dict[str, Any]:
video_pts_sizes = [len(v) for v in self.video_pts]
# To be back-compatible, we convert data to dtype torch.long as needed
# because for empty list, in legacy implementation, torch.as_tensor will
# use torch.float as default dtype. This happens when decoding fails and
# no pts is returned in the list.
video_pts = [x.to(torch.int64) for x in self.video_pts]
# video_pts can be an empty list if no frames have been decoded
if video_pts:
video_pts = torch.cat(video_pts) # type: ignore[assignment]
# avoid bug in https://github.com/pytorch/pytorch/issues/32351
# TODO: Revert it once the bug is fixed.
video_pts = video_pts.numpy() # type: ignore[attr-defined]
# make a copy of the fields of self
d = self.__dict__.copy()
d["video_pts_sizes"] = video_pts_sizes
d["video_pts"] = video_pts
# delete the following attributes to reduce the size of dictionary. They
# will be re-computed in "__setstate__()"
del d["clips"]
del d["resampling_idxs"]
del d["cumulative_sizes"]
# for backwards-compatibility
d["_version"] = 2
return d
def __setstate__(self, d: Dict[str, Any]) -> None:
# for backwards-compatibility
if "_version" not in d:
self.__dict__ = d
return
video_pts = torch.as_tensor(d["video_pts"], dtype=torch.int64)
video_pts = torch.split(video_pts, d["video_pts_sizes"], dim=0)
# don't need this info anymore
del d["video_pts_sizes"]
d["video_pts"] = video_pts
self.__dict__ = d
# recompute attributes "clips", "resampling_idxs" and other derivative ones
self.compute_clips(self.num_frames, self.step, self.frame_rate)
```
|
======================================================================================================================
SOURCE CODE FILE: vision.py
LINES: 3
SIZE: 4.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\vision.py
ENCODING: utf-8
```py
import os
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string, optional): Root directory of dataset. Only used for `__repr__`.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: Union[str, Path] = None, # type: ignore[assignment]
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
```
|
===================================================================================================================
SOURCE CODE FILE: voc.py
LINES: 2
SIZE: 8.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\voc.py
ENCODING: utf-8
```py
import collections
import os
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from xml.etree.ElementTree import Element as ET_Element
try:
from defusedxml.ElementTree import parse as ET_parse
except ImportError:
from xml.etree.ElementTree import parse as ET_parse
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
DATASET_YEAR_DICT = {
"2012": {
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar",
"filename": "VOCtrainval_11-May-2012.tar",
"md5": "6cd6e144f989b92b3379bac3b3de84fd",
"base_dir": os.path.join("VOCdevkit", "VOC2012"),
},
"2011": {
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar",
"filename": "VOCtrainval_25-May-2011.tar",
"md5": "6c3384ef61512963050cb5d687e5bf1e",
"base_dir": os.path.join("TrainVal", "VOCdevkit", "VOC2011"),
},
"2010": {
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar",
"filename": "VOCtrainval_03-May-2010.tar",
"md5": "da459979d0c395079b5c75ee67908abb",
"base_dir": os.path.join("VOCdevkit", "VOC2010"),
},
"2009": {
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar",
"filename": "VOCtrainval_11-May-2009.tar",
"md5": "a3e00b113cfcfebf17e343f59da3caa1",
"base_dir": os.path.join("VOCdevkit", "VOC2009"),
},
"2008": {
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar",
"filename": "VOCtrainval_11-May-2012.tar",
"md5": "2629fa636546599198acfcfbfcf1904a",
"base_dir": os.path.join("VOCdevkit", "VOC2008"),
},
"2007": {
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar",
"filename": "VOCtrainval_06-Nov-2007.tar",
"md5": "c52e279531787c972589f7e41ab4ae64",
"base_dir": os.path.join("VOCdevkit", "VOC2007"),
},
"2007-test": {
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar",
"filename": "VOCtest_06-Nov-2007.tar",
"md5": "b6e924de25625d8de591ea690078ad9f",
"base_dir": os.path.join("VOCdevkit", "VOC2007"),
},
}
class _VOCBase(VisionDataset):
_SPLITS_DIR: str
_TARGET_DIR: str
_TARGET_FILE_EXT: str
def __init__(
self,
root: Union[str, Path],
year: str = "2012",
image_set: str = "train",
download: bool = False,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
):
super().__init__(root, transforms, transform, target_transform)
self.year = verify_str_arg(year, "year", valid_values=[str(yr) for yr in range(2007, 2013)])
valid_image_sets = ["train", "trainval", "val"]
if year == "2007":
valid_image_sets.append("test")
self.image_set = verify_str_arg(image_set, "image_set", valid_image_sets)
key = "2007-test" if year == "2007" and image_set == "test" else year
dataset_year_dict = DATASET_YEAR_DICT[key]
self.url = dataset_year_dict["url"]
self.filename = dataset_year_dict["filename"]
self.md5 = dataset_year_dict["md5"]
base_dir = dataset_year_dict["base_dir"]
voc_root = os.path.join(self.root, base_dir)
if download:
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5)
if not os.path.isdir(voc_root):
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
splits_dir = os.path.join(voc_root, "ImageSets", self._SPLITS_DIR)
split_f = os.path.join(splits_dir, image_set.rstrip("\n") + ".txt")
with open(os.path.join(split_f)) as f:
file_names = [x.strip() for x in f.readlines()]
image_dir = os.path.join(voc_root, "JPEGImages")
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
target_dir = os.path.join(voc_root, self._TARGET_DIR)
self.targets = [os.path.join(target_dir, x + self._TARGET_FILE_EXT) for x in file_names]
assert len(self.images) == len(self.targets)
def __len__(self) -> int:
return len(self.images)
class VOCSegmentation(_VOCBase):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``.
image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If
``year=="2007"``, can also be ``"test"``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
_SPLITS_DIR = "Segmentation"
_TARGET_DIR = "SegmentationClass"
_TARGET_FILE_EXT = ".png"
@property
def masks(self) -> List[str]:
return self.targets
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
img = Image.open(self.images[index]).convert("RGB")
target = Image.open(self.masks[index])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
class VOCDetection(_VOCBase):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``.
image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If
``year=="2007"``, can also be ``"test"``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
(default: alphabetic indexing of VOC's 20 classes).
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, required): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
_SPLITS_DIR = "Main"
_TARGET_DIR = "Annotations"
_TARGET_FILE_EXT = ".xml"
@property
def annotations(self) -> List[str]:
return self.targets
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a dictionary of the XML tree.
"""
img = Image.open(self.images[index]).convert("RGB")
target = self.parse_voc_xml(ET_parse(self.annotations[index]).getroot())
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
@staticmethod
def parse_voc_xml(node: ET_Element) -> Dict[str, Any]:
voc_dict: Dict[str, Any] = {}
children = list(node)
if children:
def_dic: Dict[str, Any] = collections.defaultdict(list)
for dc in map(VOCDetection.parse_voc_xml, children):
for ind, v in dc.items():
def_dic[ind].append(v)
if node.tag == "annotation":
def_dic["object"] = [def_dic["object"]]
voc_dict = {node.tag: {ind: v[0] if len(v) == 1 else v for ind, v in def_dic.items()}}
if node.text:
text = node.text.strip()
if not children:
voc_dict[node.tag] = text
return voc_dict
```
|
=========================================================================================================================
SOURCE CODE FILE: widerface.py
LINES: 2
SIZE: 8.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\datasets\widerface.py
ENCODING: utf-8
```py
import os
from os.path import abspath, expanduser
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from PIL import Image
from .utils import download_and_extract_archive, download_file_from_google_drive, extract_archive, verify_str_arg
from .vision import VisionDataset
class WIDERFace(VisionDataset):
"""`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory where images and annotations are downloaded to.
Expects the following folder structure if download=False:
.. code::
<root>
└── widerface
├── wider_face_split ('wider_face_split.zip' if compressed)
├── WIDER_train ('WIDER_train.zip' if compressed)
├── WIDER_val ('WIDER_val.zip' if compressed)
└── WIDER_test ('WIDER_test.zip' if compressed)
split (string): The dataset split to use. One of {``train``, ``val``, ``test``}.
Defaults to ``train``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
.. warning::
To download the dataset `gdown <https://github.com/wkentaro/gdown>`_ is required.
"""
BASE_FOLDER = "widerface"
FILE_LIST = [
# File ID MD5 Hash Filename
("15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M", "3fedf70df600953d25982bcd13d91ba2", "WIDER_train.zip"),
("1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q", "dfa7d7e790efa35df3788964cf0bbaea", "WIDER_val.zip"),
("1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T", "e5d8f4248ed24c334bbd12f49c29dd40", "WIDER_test.zip"),
]
ANNOTATIONS_FILE = (
"http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip",
"0e3767bcf0e326556d407bf5bff5d27c",
"wider_face_split.zip",
)
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(
root=os.path.join(root, self.BASE_FOLDER), transform=transform, target_transform=target_transform
)
# check arguments
self.split = verify_str_arg(split, "split", ("train", "val", "test"))
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download and prepare it")
self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = []
if self.split in ("train", "val"):
self.parse_train_val_annotations_file()
else:
self.parse_test_annotations_file()
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a dict of annotations for all faces in the image.
target=None for the test split.
"""
# stay consistent with other datasets and return a PIL Image
img = Image.open(self.img_info[index]["img_path"]) # type: ignore[arg-type]
if self.transform is not None:
img = self.transform(img)
target = None if self.split == "test" else self.img_info[index]["annotations"]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.img_info)
def extra_repr(self) -> str:
lines = ["Split: {split}"]
return "\n".join(lines).format(**self.__dict__)
def parse_train_val_annotations_file(self) -> None:
filename = "wider_face_train_bbx_gt.txt" if self.split == "train" else "wider_face_val_bbx_gt.txt"
filepath = os.path.join(self.root, "wider_face_split", filename)
with open(filepath) as f:
lines = f.readlines()
file_name_line, num_boxes_line, box_annotation_line = True, False, False
num_boxes, box_counter = 0, 0
labels = []
for line in lines:
line = line.rstrip()
if file_name_line:
img_path = os.path.join(self.root, "WIDER_" + self.split, "images", line)
img_path = abspath(expanduser(img_path))
file_name_line = False
num_boxes_line = True
elif num_boxes_line:
num_boxes = int(line)
num_boxes_line = False
box_annotation_line = True
elif box_annotation_line:
box_counter += 1
line_split = line.split(" ")
line_values = [int(x) for x in line_split]
labels.append(line_values)
if box_counter >= num_boxes:
box_annotation_line = False
file_name_line = True
labels_tensor = torch.tensor(labels)
self.img_info.append(
{
"img_path": img_path,
"annotations": {
"bbox": labels_tensor[:, 0:4].clone(), # x, y, width, height
"blur": labels_tensor[:, 4].clone(),
"expression": labels_tensor[:, 5].clone(),
"illumination": labels_tensor[:, 6].clone(),
"occlusion": labels_tensor[:, 7].clone(),
"pose": labels_tensor[:, 8].clone(),
"invalid": labels_tensor[:, 9].clone(),
},
}
)
box_counter = 0
labels.clear()
else:
raise RuntimeError(f"Error parsing annotation file {filepath}")
def parse_test_annotations_file(self) -> None:
filepath = os.path.join(self.root, "wider_face_split", "wider_face_test_filelist.txt")
filepath = abspath(expanduser(filepath))
with open(filepath) as f:
lines = f.readlines()
for line in lines:
line = line.rstrip()
img_path = os.path.join(self.root, "WIDER_test", "images", line)
img_path = abspath(expanduser(img_path))
self.img_info.append({"img_path": img_path})
def _check_integrity(self) -> bool:
# Allow original archive to be deleted (zip). Only need the extracted images
all_files = self.FILE_LIST.copy()
all_files.append(self.ANNOTATIONS_FILE)
for (_, md5, filename) in all_files:
file, ext = os.path.splitext(filename)
extracted_dir = os.path.join(self.root, file)
if not os.path.exists(extracted_dir):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
# download and extract image data
for (file_id, md5, filename) in self.FILE_LIST:
download_file_from_google_drive(file_id, self.root, filename, md5)
filepath = os.path.join(self.root, filename)
extract_archive(filepath)
# download and extract annotation files
download_and_extract_archive(
url=self.ANNOTATIONS_FILE[0], download_root=self.root, md5=self.ANNOTATIONS_FILE[1]
)
```
|
================================================================================================================
SOURCE CODE FILE: extension.py
LINES: 1
SIZE: 3.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\extension.py
ENCODING: utf-8
```py
import os
import sys
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make sure the
# conda environment/bin path is configured Please take a look:
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
# Please note: if some path can't be added using add_dll_directory we simply ignore this path
if os.name == "nt" and sys.version_info < (3, 9):
env_path = os.environ["PATH"]
path_arr = env_path.split(";")
for path in path_arr:
if os.path.exists(path):
try:
os.add_dll_directory(path) # type: ignore[attr-defined]
except Exception:
pass
lib_path = _get_extension_path("_C")
torch.ops.load_library(lib_path)
_HAS_OPS = True
def _has_ops(): # noqa: F811
return True
except (ImportError, OSError):
pass
def _assert_has_ops():
if not _has_ops():
raise RuntimeError(
"Couldn't load custom C++ ops. This can happen if your PyTorch and "
"torchvision versions are incompatible, or if you had errors while compiling "
"torchvision from source. For further information on the compatible versions, check "
"https://github.com/pytorch/vision#installation for the compatibility matrix. "
"Please check your PyTorch version with torch.__version__ and your torchvision "
"version with torchvision.__version__ and verify if they are compatible, and if not "
"please reinstall torchvision so that it matches your PyTorch install."
)
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
from torch.version import cuda as torch_version_cuda
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch_version_cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch_version_cuda.split(".")
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major:
raise RuntimeError(
"Detected that PyTorch and torchvision were compiled with different CUDA major versions. "
f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has "
f"CUDA Version={tv_major}.{tv_minor}. "
"Please reinstall the torchvision that matches your PyTorch install."
)
return _version
def _load_library(lib_name):
lib_path = _get_extension_path(lib_name)
torch.ops.load_library(lib_path)
_check_cuda_version()
```
|
==================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\io\__init__.py
ENCODING: utf-8
```py
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_CPU_VIDEO_DECODER,
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_avif,
decode_gif,
decode_heic,
decode_image,
decode_jpeg,
decode_png,
decode_webp,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_CPU_VIDEO_DECODER",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"decode_avif",
"decode_heic",
"decode_webp",
"decode_gif",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
```
|
===========================================================================================================================
SOURCE CODE FILE: _load_gpu_decoder.py
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\io\_load_gpu_decoder.py
ENCODING: utf-8
```py
from ..extension import _load_library
try:
_load_library("gpu_decoder")
_HAS_GPU_VIDEO_DECODER = True
except (ImportError, OSError):
_HAS_GPU_VIDEO_DECODER = False
```
|
====================================================================================================================================
SOURCE CODE FILE: _video_deprecation_warning.py
LINES: 1
SIZE: 0.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\io\_video_deprecation_warning.py
ENCODING: utf-8
```py
import warnings
def _raise_video_deprecation_warning():
warnings.warn(
"The video decoding and encoding capabilities of torchvision "
"are deprecated from version 0.22 and will be removed in version 0.24. "
"We recommend that you migrate to TorchCodec, where we'll consolidate "
"the future decoding/encoding capabilities of PyTorch: "
"https://github.com/pytorch/torchcodec",
UserWarning,
)
```
|
====================================================================================================================
SOURCE CODE FILE: _video_opt.py
LINES: 1
SIZE: 20.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\io\_video_opt.py
ENCODING: utf-8
```py
import math
import warnings
from fractions import Fraction
from typing import Dict, List, Optional, Tuple, Union
import torch
from ..extension import _load_library
from ._video_deprecation_warning import _raise_video_deprecation_warning
try:
_load_library("video_reader")
_HAS_CPU_VIDEO_DECODER = True
except (ImportError, OSError):
_HAS_CPU_VIDEO_DECODER = False
_HAS_VIDEO_OPT = _HAS_CPU_VIDEO_DECODER # For BC
default_timebase = Fraction(0, 1)
# simple class for torch scripting
# the complex Fraction class from fractions module is not scriptable
class Timebase:
__annotations__ = {"numerator": int, "denominator": int}
__slots__ = ["numerator", "denominator"]
def __init__(
self,
numerator: int,
denominator: int,
) -> None:
self.numerator = numerator
self.denominator = denominator
class VideoMetaData:
__annotations__ = {
"has_video": bool,
"video_timebase": Timebase,
"video_duration": float,
"video_fps": float,
"has_audio": bool,
"audio_timebase": Timebase,
"audio_duration": float,
"audio_sample_rate": float,
}
__slots__ = [
"has_video",
"video_timebase",
"video_duration",
"video_fps",
"has_audio",
"audio_timebase",
"audio_duration",
"audio_sample_rate",
]
def __init__(self) -> None:
self.has_video = False
self.video_timebase = Timebase(0, 1)
self.video_duration = 0.0
self.video_fps = 0.0
self.has_audio = False
self.audio_timebase = Timebase(0, 1)
self.audio_duration = 0.0
self.audio_sample_rate = 0.0
def _validate_pts(pts_range: Tuple[int, int]) -> None:
if pts_range[0] > pts_range[1] > 0:
raise ValueError(
f"Start pts should not be smaller than end pts, got start pts: {pts_range[0]} and end pts: {pts_range[1]}"
)
def _fill_info(
vtimebase: torch.Tensor,
vfps: torch.Tensor,
vduration: torch.Tensor,
atimebase: torch.Tensor,
asample_rate: torch.Tensor,
aduration: torch.Tensor,
) -> VideoMetaData:
"""
Build update VideoMetaData struct with info about the video
"""
meta = VideoMetaData()
if vtimebase.numel() > 0:
meta.video_timebase = Timebase(int(vtimebase[0].item()), int(vtimebase[1].item()))
timebase = vtimebase[0].item() / float(vtimebase[1].item())
if vduration.numel() > 0:
meta.has_video = True
meta.video_duration = float(vduration.item()) * timebase
if vfps.numel() > 0:
meta.video_fps = float(vfps.item())
if atimebase.numel() > 0:
meta.audio_timebase = Timebase(int(atimebase[0].item()), int(atimebase[1].item()))
timebase = atimebase[0].item() / float(atimebase[1].item())
if aduration.numel() > 0:
meta.has_audio = True
meta.audio_duration = float(aduration.item()) * timebase
if asample_rate.numel() > 0:
meta.audio_sample_rate = float(asample_rate.item())
return meta
def _align_audio_frames(
aframes: torch.Tensor, aframe_pts: torch.Tensor, audio_pts_range: Tuple[int, int]
) -> torch.Tensor:
start, end = aframe_pts[0], aframe_pts[-1]
num_samples = aframes.size(0)
step_per_aframe = float(end - start + 1) / float(num_samples)
s_idx = 0
e_idx = num_samples
if start < audio_pts_range[0]:
s_idx = int((audio_pts_range[0] - start) / step_per_aframe)
if audio_pts_range[1] != -1 and end > audio_pts_range[1]:
e_idx = int((audio_pts_range[1] - end) / step_per_aframe)
return aframes[s_idx:e_idx, :]
def _read_video_from_file(
filename: str,
seek_frame_margin: float = 0.25,
read_video_stream: bool = True,
video_width: int = 0,
video_height: int = 0,
video_min_dimension: int = 0,
video_max_dimension: int = 0,
video_pts_range: Tuple[int, int] = (0, -1),
video_timebase: Fraction = default_timebase,
read_audio_stream: bool = True,
audio_samples: int = 0,
audio_channels: int = 0,
audio_pts_range: Tuple[int, int] = (0, -1),
audio_timebase: Fraction = default_timebase,
) -> Tuple[torch.Tensor, torch.Tensor, VideoMetaData]:
"""
Reads a video from a file, returning both the video frames and the audio frames
Args:
filename (str): path to the video file
seek_frame_margin (double, optional): seeking frame in the stream is imprecise. Thus,
when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
the size of decoded frames:
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the original frame resolution
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension = 0, keep the aspect ratio and resize the
frame so that shorter edge size is video_min_dimension
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension != 0, keep the aspect ratio and resize
the frame so that longer edge size is video_max_dimension
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension != 0, resize the frame so that shorter
edge size is video_min_dimension, and longer edge size is
video_max_dimension. The aspect ratio may not be preserved
- When video_width = 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_height is $video_height
- When video_width != 0, video_height == 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_width is $video_width
- When video_width != 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, resize the frame so that frame
video_width and video_height are set to $video_width and
$video_height, respectively
video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
video_timebase (Fraction, optional): a Fraction rational number which denotes timebase in video stream
read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
audio_samples (int, optional): audio sampling rate
audio_channels (int optional): audio channels
audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
audio_timebase (Fraction, optional): a Fraction rational number which denotes time base in audio stream
Returns
vframes (Tensor[T, H, W, C]): the `T` video frames
aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
`K` is the number of audio_channels
info (Dict): metadata for the video and audio. Can contain the fields video_fps (float)
and audio_fps (int)
"""
_raise_video_deprecation_warning()
_validate_pts(video_pts_range)
_validate_pts(audio_pts_range)
result = torch.ops.video_reader.read_video_from_file(
filename,
seek_frame_margin,
0, # getPtsOnly
read_video_stream,
video_width,
video_height,
video_min_dimension,
video_max_dimension,
video_pts_range[0],
video_pts_range[1],
video_timebase.numerator,
video_timebase.denominator,
read_audio_stream,
audio_samples,
audio_channels,
audio_pts_range[0],
audio_pts_range[1],
audio_timebase.numerator,
audio_timebase.denominator,
)
vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
if aframes.numel() > 0:
# when audio stream is found
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
return vframes, aframes, info
def _read_video_timestamps_from_file(filename: str) -> Tuple[List[int], List[int], VideoMetaData]:
"""
Decode all video- and audio frames in the video. Only pts
(presentation timestamp) is returned. The actual frame pixel data is not
copied. Thus, it is much faster than read_video(...)
"""
result = torch.ops.video_reader.read_video_from_file(
filename,
0, # seek_frame_margin
1, # getPtsOnly
1, # read_video_stream
0, # video_width
0, # video_height
0, # video_min_dimension
0, # video_max_dimension
0, # video_start_pts
-1, # video_end_pts
0, # video_timebase_num
1, # video_timebase_den
1, # read_audio_stream
0, # audio_samples
0, # audio_channels
0, # audio_start_pts
-1, # audio_end_pts
0, # audio_timebase_num
1, # audio_timebase_den
)
_vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
vframe_pts = vframe_pts.numpy().tolist()
aframe_pts = aframe_pts.numpy().tolist()
return vframe_pts, aframe_pts, info
def _probe_video_from_file(filename: str) -> VideoMetaData:
"""
Probe a video file and return VideoMetaData with info about the video
"""
_raise_video_deprecation_warning()
result = torch.ops.video_reader.probe_video_from_file(filename)
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
return info
def _read_video_from_memory(
video_data: torch.Tensor,
seek_frame_margin: float = 0.25,
read_video_stream: int = 1,
video_width: int = 0,
video_height: int = 0,
video_min_dimension: int = 0,
video_max_dimension: int = 0,
video_pts_range: Tuple[int, int] = (0, -1),
video_timebase_numerator: int = 0,
video_timebase_denominator: int = 1,
read_audio_stream: int = 1,
audio_samples: int = 0,
audio_channels: int = 0,
audio_pts_range: Tuple[int, int] = (0, -1),
audio_timebase_numerator: int = 0,
audio_timebase_denominator: int = 1,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Reads a video from memory, returning both the video frames as the audio frames
This function is torchscriptable.
Args:
video_data (data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes):
compressed video content stored in either 1) torch.Tensor 2) python bytes
seek_frame_margin (double, optional): seeking frame in the stream is imprecise.
Thus, when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
the size of decoded frames:
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the original frame resolution
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension = 0, keep the aspect ratio and resize the
frame so that shorter edge size is video_min_dimension
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension != 0, keep the aspect ratio and resize
the frame so that longer edge size is video_max_dimension
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension != 0, resize the frame so that shorter
edge size is video_min_dimension, and longer edge size is
video_max_dimension. The aspect ratio may not be preserved
- When video_width = 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_height is $video_height
- When video_width != 0, video_height == 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_width is $video_width
- When video_width != 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, resize the frame so that frame
video_width and video_height are set to $video_width and
$video_height, respectively
video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
video_timebase_numerator / video_timebase_denominator (float, optional): a rational
number which denotes timebase in video stream
read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
audio_samples (int, optional): audio sampling rate
audio_channels (int optional): audio audio_channels
audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
audio_timebase_numerator / audio_timebase_denominator (float, optional):
a rational number which denotes time base in audio stream
Returns:
vframes (Tensor[T, H, W, C]): the `T` video frames
aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
`K` is the number of channels
"""
_raise_video_deprecation_warning()
_validate_pts(video_pts_range)
_validate_pts(audio_pts_range)
if not isinstance(video_data, torch.Tensor):
with warnings.catch_warnings():
# Ignore the warning because we actually don't modify the buffer in this function
warnings.filterwarnings("ignore", message="The given buffer is not writable")
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
result = torch.ops.video_reader.read_video_from_memory(
video_data,
seek_frame_margin,
0, # getPtsOnly
read_video_stream,
video_width,
video_height,
video_min_dimension,
video_max_dimension,
video_pts_range[0],
video_pts_range[1],
video_timebase_numerator,
video_timebase_denominator,
read_audio_stream,
audio_samples,
audio_channels,
audio_pts_range[0],
audio_pts_range[1],
audio_timebase_numerator,
audio_timebase_denominator,
)
vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
if aframes.numel() > 0:
# when audio stream is found
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
return vframes, aframes
def _read_video_timestamps_from_memory(
video_data: torch.Tensor,
) -> Tuple[List[int], List[int], VideoMetaData]:
"""
Decode all frames in the video. Only pts (presentation timestamp) is returned.
The actual frame pixel data is not copied. Thus, read_video_timestamps(...)
is much faster than read_video(...)
"""
if not isinstance(video_data, torch.Tensor):
with warnings.catch_warnings():
# Ignore the warning because we actually don't modify the buffer in this function
warnings.filterwarnings("ignore", message="The given buffer is not writable")
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
result = torch.ops.video_reader.read_video_from_memory(
video_data,
0, # seek_frame_margin
1, # getPtsOnly
1, # read_video_stream
0, # video_width
0, # video_height
0, # video_min_dimension
0, # video_max_dimension
0, # video_start_pts
-1, # video_end_pts
0, # video_timebase_num
1, # video_timebase_den
1, # read_audio_stream
0, # audio_samples
0, # audio_channels
0, # audio_start_pts
-1, # audio_end_pts
0, # audio_timebase_num
1, # audio_timebase_den
)
_raise_video_deprecation_warning()
_vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
vframe_pts = vframe_pts.numpy().tolist()
aframe_pts = aframe_pts.numpy().tolist()
return vframe_pts, aframe_pts, info
def _probe_video_from_memory(
video_data: torch.Tensor,
) -> VideoMetaData:
"""
Probe a video in memory and return VideoMetaData with info about the video
This function is torchscriptable
"""
_raise_video_deprecation_warning()
if not isinstance(video_data, torch.Tensor):
with warnings.catch_warnings():
# Ignore the warning because we actually don't modify the buffer in this function
warnings.filterwarnings("ignore", message="The given buffer is not writable")
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
result = torch.ops.video_reader.probe_video_from_memory(video_data)
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
return info
def _read_video(
filename: str,
start_pts: Union[float, Fraction] = 0,
end_pts: Optional[Union[float, Fraction]] = None,
pts_unit: str = "pts",
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, float]]:
_raise_video_deprecation_warning()
if end_pts is None:
end_pts = float("inf")
if pts_unit == "pts":
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
info = _probe_video_from_file(filename)
has_video = info.has_video
has_audio = info.has_audio
def get_pts(time_base):
start_offset = start_pts
end_offset = end_pts
if pts_unit == "sec":
start_offset = int(math.floor(start_pts * (1 / time_base)))
if end_offset != float("inf"):
end_offset = int(math.ceil(end_pts * (1 / time_base)))
if end_offset == float("inf"):
end_offset = -1
return start_offset, end_offset
video_pts_range = (0, -1)
video_timebase = default_timebase
if has_video:
video_timebase = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
video_pts_range = get_pts(video_timebase)
audio_pts_range = (0, -1)
audio_timebase = default_timebase
if has_audio:
audio_timebase = Fraction(info.audio_timebase.numerator, info.audio_timebase.denominator)
audio_pts_range = get_pts(audio_timebase)
vframes, aframes, info = _read_video_from_file(
filename,
read_video_stream=True,
video_pts_range=video_pts_range,
video_timebase=video_timebase,
read_audio_stream=True,
audio_pts_range=audio_pts_range,
audio_timebase=audio_timebase,
)
_info = {}
if has_video:
_info["video_fps"] = info.video_fps
if has_audio:
_info["audio_fps"] = info.audio_sample_rate
return vframes, aframes, _info
def _read_video_timestamps(
filename: str, pts_unit: str = "pts"
) -> Tuple[Union[List[int], List[Fraction]], Optional[float]]:
_raise_video_deprecation_warning()
if pts_unit == "pts":
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
pts: Union[List[int], List[Fraction]]
pts, _, info = _read_video_timestamps_from_file(filename)
if pts_unit == "sec":
video_time_base = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
pts = [x * video_time_base for x in pts]
video_fps = info.video_fps if info.has_video else None
return pts, video_fps
```
|
===============================================================================================================
SOURCE CODE FILE: image.py
LINES: 1
SIZE: 21.71 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\io\image.py
ENCODING: utf-8
```py
from enum import Enum
from typing import List, Union
from warnings import warn
import torch
from ..extension import _load_library
from ..utils import _log_api_usage_once
try:
_load_library("image")
except (ImportError, OSError) as e:
warn(
f"Failed to load image Python extension: '{e}'"
f"If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. "
f"Otherwise, there might be something wrong with your environment. "
f"Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?"
)
class ImageReadMode(Enum):
"""Allow automatic conversion to RGB, RGBA, etc while decoding.
.. note::
You don't need to use this struct, you can just pass strings to all
``mode`` parameters, e.g. ``mode="RGB"``.
The different available modes are the following.
- UNCHANGED: loads the image as-is
- RGB: converts to RGB
- RGBA: converts to RGB with transparency (also aliased as RGB_ALPHA)
- GRAY: converts to grayscale
- GRAY_ALPHA: converts to grayscale with transparency
.. note::
Some decoders won't support all possible values, e.g. GRAY and
GRAY_ALPHA are only supported for PNG and JPEG images.
"""
UNCHANGED = 0
GRAY = 1
GRAY_ALPHA = 2
RGB = 3
RGB_ALPHA = 4
RGBA = RGB_ALPHA # Alias for convenience
def read_file(path: str) -> torch.Tensor:
"""
Return the bytes contents of a file as a uint8 1D Tensor.
Args:
path (str or ``pathlib.Path``): the path to the file to be read
Returns:
data (Tensor)
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_file)
data = torch.ops.image.read_file(str(path))
return data
def write_file(filename: str, data: torch.Tensor) -> None:
"""
Write the content of an uint8 1D tensor to a file.
Args:
filename (str or ``pathlib.Path``): the path to the file to be written
data (Tensor): the contents to be written to the output file
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_file)
torch.ops.image.write_file(str(filename), data)
def decode_png(
input: torch.Tensor,
mode: ImageReadMode = ImageReadMode.UNCHANGED,
apply_exif_orientation: bool = False,
) -> torch.Tensor:
"""
Decodes a PNG image into a 3 dimensional RGB or grayscale Tensor.
The values of the output tensor are in uint8 in [0, 255] for most cases. If
the image is a 16-bit png, then the output tensor is uint16 in [0, 65535]
(supported from torchvision ``0.21``). Since uint16 support is limited in
pytorch, we recommend calling
:func:`torchvision.transforms.v2.functional.to_dtype()` with ``scale=True``
after this function to convert the decoded image into a uint8 or float
tensor.
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the PNG image.
mode (str or ImageReadMode): The mode to convert the image to, e.g. "RGB".
Default is "UNCHANGED". See :class:`~torchvision.io.ImageReadMode`
for available modes.
apply_exif_orientation (bool): apply EXIF orientation transformation to the output tensor.
Default: False.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_png)
if isinstance(mode, str):
mode = ImageReadMode[mode.upper()]
output = torch.ops.image.decode_png(input, mode.value, apply_exif_orientation)
return output
def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor:
"""
Takes an input tensor in CHW layout and returns a buffer with the contents
of its corresponding PNG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of
``c`` channels, where ``c`` must 3 or 1.
compression_level (int): Compression factor for the resulting file, it must be a number
between 0 and 9. Default: 6
Returns:
Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the
PNG file.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_png)
output = torch.ops.image.encode_png(input, compression_level)
return output
def write_png(input: torch.Tensor, filename: str, compression_level: int = 6):
"""
Takes an input tensor in CHW layout (or HW in the case of grayscale images)
and saves it in a PNG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of
``c`` channels, where ``c`` must be 1 or 3.
filename (str or ``pathlib.Path``): Path to save the image.
compression_level (int): Compression factor for the resulting file, it must be a number
between 0 and 9. Default: 6
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_png)
output = encode_png(input, compression_level)
write_file(filename, output)
def decode_jpeg(
input: Union[torch.Tensor, List[torch.Tensor]],
mode: ImageReadMode = ImageReadMode.UNCHANGED,
device: Union[str, torch.device] = "cpu",
apply_exif_orientation: bool = False,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Decode JPEG image(s) into 3D RGB or grayscale Tensor(s), on CPU or CUDA.
The values of the output tensor are uint8 between 0 and 255.
.. note::
When using a CUDA device, passing a list of tensors is more efficient than repeated individual calls to ``decode_jpeg``.
When using CPU the performance is equivalent.
The CUDA version of this function has explicitly been designed with thread-safety in mind.
This function does not return partial results in case of an error.
Args:
input (Tensor[1] or list[Tensor[1]]): a (list of) one dimensional uint8 tensor(s) containing
the raw bytes of the JPEG image. The tensor(s) must be on CPU,
regardless of the ``device`` parameter.
mode (str or ImageReadMode): The mode to convert the image to, e.g. "RGB".
Default is "UNCHANGED". See :class:`~torchvision.io.ImageReadMode`
for available modes.
device (str or torch.device): The device on which the decoded image will
be stored. If a cuda device is specified, the image will be decoded
with `nvjpeg <https://developer.nvidia.com/nvjpeg>`_. This is only
supported for CUDA version >= 10.1
.. betastatus:: device parameter
.. warning::
There is a memory leak in the nvjpeg library for CUDA versions < 11.6.
Make sure to rely on CUDA 11.6 or above before using ``device="cuda"``.
apply_exif_orientation (bool): apply EXIF orientation transformation to the output tensor.
Default: False. Only implemented for JPEG format on CPU.
Returns:
output (Tensor[image_channels, image_height, image_width] or list[Tensor[image_channels, image_height, image_width]]):
The values of the output tensor(s) are uint8 between 0 and 255.
``output.device`` will be set to the specified ``device``
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_jpeg)
if isinstance(device, str):
device = torch.device(device)
if isinstance(mode, str):
mode = ImageReadMode[mode.upper()]
if isinstance(input, list):
if len(input) == 0:
raise ValueError("Input list must contain at least one element")
if not all(isinstance(t, torch.Tensor) for t in input):
raise ValueError("All elements of the input list must be tensors.")
if not all(t.device.type == "cpu" for t in input):
raise ValueError("Input list must contain tensors on CPU.")
if device.type == "cuda":
return torch.ops.image.decode_jpegs_cuda(input, mode.value, device)
else:
return [torch.ops.image.decode_jpeg(img, mode.value, apply_exif_orientation) for img in input]
else: # input is tensor
if input.device.type != "cpu":
raise ValueError("Input tensor must be a CPU tensor")
if device.type == "cuda":
return torch.ops.image.decode_jpegs_cuda([input], mode.value, device)[0]
else:
return torch.ops.image.decode_jpeg(input, mode.value, apply_exif_orientation)
def encode_jpeg(
input: Union[torch.Tensor, List[torch.Tensor]], quality: int = 75
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Encode RGB tensor(s) into raw encoded jpeg bytes, on CPU or CUDA.
.. note::
Passing a list of CUDA tensors is more efficient than repeated individual calls to ``encode_jpeg``.
For CPU tensors the performance is equivalent.
Args:
input (Tensor[channels, image_height, image_width] or List[Tensor[channels, image_height, image_width]]):
(list of) uint8 image tensor(s) of ``c`` channels, where ``c`` must be 1 or 3
quality (int): Quality of the resulting JPEG file(s). Must be a number between
1 and 100. Default: 75
Returns:
output (Tensor[1] or list[Tensor[1]]): A (list of) one dimensional uint8 tensor(s) that contain the raw bytes of the JPEG file.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_jpeg)
if quality < 1 or quality > 100:
raise ValueError("Image quality should be a positive number between 1 and 100")
if isinstance(input, list):
if not input:
raise ValueError("encode_jpeg requires at least one input tensor when a list is passed")
if input[0].device.type == "cuda":
return torch.ops.image.encode_jpegs_cuda(input, quality)
else:
return [torch.ops.image.encode_jpeg(image, quality) for image in input]
else: # single input tensor
if input.device.type == "cuda":
return torch.ops.image.encode_jpegs_cuda([input], quality)[0]
else:
return torch.ops.image.encode_jpeg(input, quality)
def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75):
"""
Takes an input tensor in CHW layout and saves it in a JPEG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of ``c``
channels, where ``c`` must be 1 or 3.
filename (str or ``pathlib.Path``): Path to save the image.
quality (int): Quality of the resulting JPEG file, it must be a number
between 1 and 100. Default: 75
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_jpeg)
output = encode_jpeg(input, quality)
assert isinstance(output, torch.Tensor) # Needed for torchscript
write_file(filename, output)
def decode_image(
input: Union[torch.Tensor, str],
mode: ImageReadMode = ImageReadMode.UNCHANGED,
apply_exif_orientation: bool = False,
) -> torch.Tensor:
"""Decode an image into a uint8 tensor, from a path or from raw encoded bytes.
Currently supported image formats are jpeg, png, gif and webp.
The values of the output tensor are in uint8 in [0, 255] for most cases.
If the image is a 16-bit png, then the output tensor is uint16 in [0, 65535]
(supported from torchvision ``0.21``). Since uint16 support is limited in
pytorch, we recommend calling
:func:`torchvision.transforms.v2.functional.to_dtype()` with ``scale=True``
after this function to convert the decoded image into a uint8 or float
tensor.
.. note::
``decode_image()`` doesn't work yet on AVIF or HEIC images. For these
formats, directly call :func:`~torchvision.io.decode_avif` or
:func:`~torchvision.io.decode_heic`.
Args:
input (Tensor or str or ``pathlib.Path``): The image to decode. If a
tensor is passed, it must be one dimensional uint8 tensor containing
the raw bytes of the image. Otherwise, this must be a path to the image file.
mode (str or ImageReadMode): The mode to convert the image to, e.g. "RGB".
Default is "UNCHANGED". See :class:`~torchvision.io.ImageReadMode`
for available modes.
apply_exif_orientation (bool): apply EXIF orientation transformation to the output tensor.
Only applies to JPEG and PNG images. Default: False.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_image)
if not isinstance(input, torch.Tensor):
input = read_file(str(input))
if isinstance(mode, str):
mode = ImageReadMode[mode.upper()]
output = torch.ops.image.decode_image(input, mode.value, apply_exif_orientation)
return output
def read_image(
path: str,
mode: ImageReadMode = ImageReadMode.UNCHANGED,
apply_exif_orientation: bool = False,
) -> torch.Tensor:
"""[OBSOLETE] Use :func:`~torchvision.io.decode_image` instead."""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_image)
data = read_file(path)
return decode_image(data, mode, apply_exif_orientation=apply_exif_orientation)
def decode_gif(input: torch.Tensor) -> torch.Tensor:
"""
Decode a GIF image into a 3 or 4 dimensional RGB Tensor.
The values of the output tensor are uint8 between 0 and 255.
The output tensor has shape ``(C, H, W)`` if there is only one image in the
GIF, and ``(N, C, H, W)`` if there are ``N`` images.
Args:
input (Tensor[1]): a one dimensional contiguous uint8 tensor containing
the raw bytes of the GIF image.
Returns:
output (Tensor[image_channels, image_height, image_width] or Tensor[num_images, image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_gif)
return torch.ops.image.decode_gif(input)
def decode_webp(
input: torch.Tensor,
mode: ImageReadMode = ImageReadMode.UNCHANGED,
) -> torch.Tensor:
"""
Decode a WEBP image into a 3 dimensional RGB[A] Tensor.
The values of the output tensor are uint8 between 0 and 255.
Args:
input (Tensor[1]): a one dimensional contiguous uint8 tensor containing
the raw bytes of the WEBP image.
mode (str or ImageReadMode): The mode to convert the image to, e.g. "RGB".
Default is "UNCHANGED". See :class:`~torchvision.io.ImageReadMode`
for available modes.
Returns:
Decoded image (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_webp)
if isinstance(mode, str):
mode = ImageReadMode[mode.upper()]
return torch.ops.image.decode_webp(input, mode.value)
# TODO_AVIF_HEIC: Better support for torchscript. Scripting decode_avif of
# decode_heic currently fails, mainly because of the logic
# _load_extra_decoders_once() (using global variables, try/except statements,
# etc.).
# The ops (torch.ops.extra_decoders_ns.decode_*) are otherwise torchscript-able,
# and users who need torchscript can always just wrap those.
# TODO_AVIF_HEIC: decode_image() should work for those. The key technical issue
# we have here is that the format detection logic of decode_image() is
# implemented in torchvision, and torchvision has zero knowledge of
# torchvision-extra-decoders, so we cannot call the AVIF/HEIC C++ decoders
# (those in torchvision-extra-decoders) from there.
# A trivial check that could be done within torchvision would be to check the
# file extension, if a path was passed. We could also just implement the
# AVIF/HEIC detection logic in Python as a fallback, if the file detection
# didn't find any format. In any case: properly determining whether a file is
# HEIC is far from trivial, and relying on libmagic would probably be best
_EXTRA_DECODERS_ALREADY_LOADED = False
def _load_extra_decoders_once():
global _EXTRA_DECODERS_ALREADY_LOADED
if _EXTRA_DECODERS_ALREADY_LOADED:
return
try:
import torchvision_extra_decoders
# torchvision-extra-decoders only supports linux for now. BUT, users on
# e.g. MacOS can still install it: they will get the pure-python
# 0.0.0.dev version:
# https://pypi.org/project/torchvision-extra-decoders/0.0.0.dev0, which
# is a dummy version that was created to reserve the namespace on PyPI.
# We have to check that expose_extra_decoders() exists for those users,
# so we can properly error on non-Linux archs.
assert hasattr(torchvision_extra_decoders, "expose_extra_decoders")
except (AssertionError, ImportError) as e:
raise RuntimeError(
"In order to enable the AVIF and HEIC decoding capabilities of "
"torchvision, you need to `pip install torchvision-extra-decoders`. "
"Just install the package, you don't need to update your code. "
"This is only supported on Linux, and this feature is still in BETA stage. "
"Please let us know of any issue: https://github.com/pytorch/vision/issues/new/choose. "
"Note that `torchvision-extra-decoders` is released under the LGPL license. "
) from e
# This will expose torch.ops.extra_decoders_ns.decode_avif and torch.ops.extra_decoders_ns.decode_heic
torchvision_extra_decoders.expose_extra_decoders()
_EXTRA_DECODERS_ALREADY_LOADED = True
def decode_avif(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""Decode an AVIF image into a 3 dimensional RGB[A] Tensor.
.. warning::
In order to enable the AVIF decoding capabilities of torchvision, you
first need to run ``pip install torchvision-extra-decoders``. Just
install the package, you don't need to update your code. This is only
supported on Linux, and this feature is still in BETA stage. Please let
us know of any issue:
https://github.com/pytorch/vision/issues/new/choose. Note that
`torchvision-extra-decoders
<https://github.com/pytorch-labs/torchvision-extra-decoders/>`_ is
released under the LGPL license.
The values of the output tensor are in uint8 in [0, 255] for most images. If
the image has a bit-depth of more than 8, then the output tensor is uint16
in [0, 65535]. Since uint16 support is limited in pytorch, we recommend
calling :func:`torchvision.transforms.v2.functional.to_dtype()` with
``scale=True`` after this function to convert the decoded image into a uint8
or float tensor.
Args:
input (Tensor[1]): a one dimensional contiguous uint8 tensor containing
the raw bytes of the AVIF image.
mode (str or ImageReadMode): The mode to convert the image to, e.g. "RGB".
Default is "UNCHANGED". See :class:`~torchvision.io.ImageReadMode`
for available modes.
Returns:
Decoded image (Tensor[image_channels, image_height, image_width])
"""
_load_extra_decoders_once()
if input.dtype != torch.uint8:
raise RuntimeError(f"Input tensor must have uint8 data type, got {input.dtype}")
return torch.ops.extra_decoders_ns.decode_avif(input, mode.value)
def decode_heic(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""Decode an HEIC image into a 3 dimensional RGB[A] Tensor.
.. warning::
In order to enable the AVIF decoding capabilities of torchvision, you
first need to run ``pip install torchvision-extra-decoders``. Just
install the package, you don't need to update your code. This is only
supported on Linux, and this feature is still in BETA stage. Please let
us know of any issue:
https://github.com/pytorch/vision/issues/new/choose. Note that
`torchvision-extra-decoders
<https://github.com/pytorch-labs/torchvision-extra-decoders/>`_ is
released under the LGPL license.
The values of the output tensor are in uint8 in [0, 255] for most images. If
the image has a bit-depth of more than 8, then the output tensor is uint16
in [0, 65535]. Since uint16 support is limited in pytorch, we recommend
calling :func:`torchvision.transforms.v2.functional.to_dtype()` with
``scale=True`` after this function to convert the decoded image into a uint8
or float tensor.
Args:
input (Tensor[1]): a one dimensional contiguous uint8 tensor containing
the raw bytes of the HEIC image.
mode (str or ImageReadMode): The mode to convert the image to, e.g. "RGB".
Default is "UNCHANGED". See :class:`~torchvision.io.ImageReadMode`
for available modes.
Returns:
Decoded image (Tensor[image_channels, image_height, image_width])
"""
_load_extra_decoders_once()
if input.dtype != torch.uint8:
raise RuntimeError(f"Input tensor must have uint8 data type, got {input.dtype}")
return torch.ops.extra_decoders_ns.decode_heic(input, mode.value)
```
|
===============================================================================================================
SOURCE CODE FILE: video.py
LINES: 1
SIZE: 18.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\io\video.py
ENCODING: utf-8
```py
import gc
import math
import os
import re
import warnings
from fractions import Fraction
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from ..utils import _log_api_usage_once
from . import _video_opt
from ._video_deprecation_warning import _raise_video_deprecation_warning
try:
import av
av.logging.set_level(av.logging.ERROR)
if not hasattr(av.video.frame.VideoFrame, "pict_type"):
av = ImportError(
"""\
Your version of PyAV is too old for the necessary video operations in torchvision.
If you are on Python 3.5, you will have to build from source (the conda-forge
packages are not up-to-date). See
https://github.com/mikeboers/PyAV#installation for instructions on how to
install PyAV on your system.
"""
)
try:
FFmpegError = av.FFmpegError # from av 14 https://github.com/PyAV-Org/PyAV/blob/main/CHANGELOG.rst
except AttributeError:
FFmpegError = av.AVError
except ImportError:
av = ImportError(
"""\
PyAV is not installed, and is necessary for the video operations in torchvision.
See https://github.com/mikeboers/PyAV#installation for instructions on how to
install PyAV on your system.
"""
)
def _check_av_available() -> None:
if isinstance(av, Exception):
raise av
def _av_available() -> bool:
return not isinstance(av, Exception)
# PyAV has some reference cycles
_CALLED_TIMES = 0
_GC_COLLECTION_INTERVAL = 10
def write_video(
filename: str,
video_array: torch.Tensor,
fps: float,
video_codec: str = "libx264",
options: Optional[Dict[str, Any]] = None,
audio_array: Optional[torch.Tensor] = None,
audio_fps: Optional[float] = None,
audio_codec: Optional[str] = None,
audio_options: Optional[Dict[str, Any]] = None,
) -> None:
"""
[DEPRECATED] Writes a 4d tensor in [T, H, W, C] format in a video file.
.. warning::
DEPRECATED: All the video decoding and encoding capabilities of torchvision
are deprecated from version 0.22 and will be removed in version 0.24. We
recommend that you migrate to
`TorchCodec <https://github.com/pytorch/torchcodec>`__, where we'll
consolidate the future decoding/encoding capabilities of PyTorch
This function relies on PyAV (therefore, ultimately FFmpeg) to encode
videos, you can get more fine-grained control by referring to the other
options at your disposal within `the FFMpeg wiki
<http://trac.ffmpeg.org/wiki#Encoding>`_.
Args:
filename (str): path where the video will be saved
video_array (Tensor[T, H, W, C]): tensor containing the individual frames,
as a uint8 tensor in [T, H, W, C] format
fps (Number): video frames per second
video_codec (str): the name of the video codec, i.e. "libx264", "h264", etc.
options (Dict): dictionary containing options to be passed into the PyAV video stream.
The list of options is codec-dependent and can all
be found from `the FFMpeg wiki <http://trac.ffmpeg.org/wiki#Encoding>`_.
audio_array (Tensor[C, N]): tensor containing the audio, where C is the number of channels
and N is the number of samples
audio_fps (Number): audio sample rate, typically 44100 or 48000
audio_codec (str): the name of the audio codec, i.e. "mp3", "aac", etc.
audio_options (Dict): dictionary containing options to be passed into the PyAV audio stream.
The list of options is codec-dependent and can all
be found from `the FFMpeg wiki <http://trac.ffmpeg.org/wiki#Encoding>`_.
Examples::
>>> # Creating libx264 video with CRF 17, for visually lossless footage:
>>>
>>> from torchvision.io import write_video
>>> # 1000 frames of 100x100, 3-channel image.
>>> vid = torch.randn(1000, 100, 100, 3, dtype = torch.uint8)
>>> write_video("video.mp4", options = {"crf": "17"})
"""
_raise_video_deprecation_warning()
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_video)
_check_av_available()
video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy(force=True)
# PyAV does not support floating point numbers with decimal point
# and will throw OverflowException in case this is not the case
if isinstance(fps, float):
fps = np.round(fps)
with av.open(filename, mode="w") as container:
stream = container.add_stream(video_codec, rate=fps)
stream.width = video_array.shape[2]
stream.height = video_array.shape[1]
stream.pix_fmt = "yuv420p" if video_codec != "libx264rgb" else "rgb24"
stream.options = options or {}
if audio_array is not None:
audio_format_dtypes = {
"dbl": "<f8",
"dblp": "<f8",
"flt": "<f4",
"fltp": "<f4",
"s16": "<i2",
"s16p": "<i2",
"s32": "<i4",
"s32p": "<i4",
"u8": "u1",
"u8p": "u1",
}
a_stream = container.add_stream(audio_codec, rate=audio_fps)
a_stream.options = audio_options or {}
num_channels = audio_array.shape[0]
audio_layout = "stereo" if num_channels > 1 else "mono"
audio_sample_fmt = container.streams.audio[0].format.name
format_dtype = np.dtype(audio_format_dtypes[audio_sample_fmt])
audio_array = torch.as_tensor(audio_array).numpy(force=True).astype(format_dtype)
frame = av.AudioFrame.from_ndarray(audio_array, format=audio_sample_fmt, layout=audio_layout)
frame.sample_rate = audio_fps
for packet in a_stream.encode(frame):
container.mux(packet)
for packet in a_stream.encode():
container.mux(packet)
for img in video_array:
frame = av.VideoFrame.from_ndarray(img, format="rgb24")
try:
frame.pict_type = "NONE"
except TypeError:
from av.video.frame import PictureType # noqa
frame.pict_type = PictureType.NONE
for packet in stream.encode(frame):
container.mux(packet)
# Flush stream
for packet in stream.encode():
container.mux(packet)
def _read_from_stream(
container: "av.container.Container",
start_offset: float,
end_offset: float,
pts_unit: str,
stream: "av.stream.Stream",
stream_name: Dict[str, Optional[Union[int, Tuple[int, ...], List[int]]]],
) -> List["av.frame.Frame"]:
global _CALLED_TIMES, _GC_COLLECTION_INTERVAL
_CALLED_TIMES += 1
if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1:
gc.collect()
if pts_unit == "sec":
# TODO: we should change all of this from ground up to simply take
# sec and convert to MS in C++
start_offset = int(math.floor(start_offset * (1 / stream.time_base)))
if end_offset != float("inf"):
end_offset = int(math.ceil(end_offset * (1 / stream.time_base)))
else:
warnings.warn("The pts_unit 'pts' gives wrong results. Please use pts_unit 'sec'.")
frames = {}
should_buffer = True
max_buffer_size = 5
if stream.type == "video":
# DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt)
# so need to buffer some extra frames to sort everything
# properly
extradata = stream.codec_context.extradata
# overly complicated way of finding if `divx_packed` is set, following
# https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263
if extradata and b"DivX" in extradata:
# can't use regex directly because of some weird characters sometimes...
pos = extradata.find(b"DivX")
d = extradata[pos:]
o = re.search(rb"DivX(\d+)Build(\d+)(\w)", d)
if o is None:
o = re.search(rb"DivX(\d+)b(\d+)(\w)", d)
if o is not None:
should_buffer = o.group(3) == b"p"
seek_offset = start_offset
# some files don't seek to the right location, so better be safe here
seek_offset = max(seek_offset - 1, 0)
if should_buffer:
# FIXME this is kind of a hack, but we will jump to the previous keyframe
# so this will be safe
seek_offset = max(seek_offset - max_buffer_size, 0)
try:
# TODO check if stream needs to always be the video stream here or not
container.seek(seek_offset, any_frame=False, backward=True, stream=stream)
except FFmpegError:
# TODO add some warnings in this case
# print("Corrupted file?", container.name)
return []
buffer_count = 0
try:
for _idx, frame in enumerate(container.decode(**stream_name)):
frames[frame.pts] = frame
if frame.pts >= end_offset:
if should_buffer and buffer_count < max_buffer_size:
buffer_count += 1
continue
break
except FFmpegError:
# TODO add a warning
pass
# ensure that the results are sorted wrt the pts
result = [frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset]
if len(frames) > 0 and start_offset > 0 and start_offset not in frames:
# if there is no frame that exactly matches the pts of start_offset
# add the last frame smaller than start_offset, to guarantee that
# we will have all the necessary data. This is most useful for audio
preceding_frames = [i for i in frames if i < start_offset]
if len(preceding_frames) > 0:
first_frame_pts = max(preceding_frames)
result.insert(0, frames[first_frame_pts])
return result
def _align_audio_frames(
aframes: torch.Tensor, audio_frames: List["av.frame.Frame"], ref_start: int, ref_end: float
) -> torch.Tensor:
start, end = audio_frames[0].pts, audio_frames[-1].pts
total_aframes = aframes.shape[1]
step_per_aframe = (end - start + 1) / total_aframes
s_idx = 0
e_idx = total_aframes
if start < ref_start:
s_idx = int((ref_start - start) / step_per_aframe)
if end > ref_end:
e_idx = int((ref_end - end) / step_per_aframe)
return aframes[:, s_idx:e_idx]
def read_video(
filename: str,
start_pts: Union[float, Fraction] = 0,
end_pts: Optional[Union[float, Fraction]] = None,
pts_unit: str = "pts",
output_format: str = "THWC",
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""[DEPRECATED] Reads a video from a file, returning both the video frames and the audio frames
.. warning::
DEPRECATED: All the video decoding and encoding capabilities of torchvision
are deprecated from version 0.22 and will be removed in version 0.24. We
recommend that you migrate to
`TorchCodec <https://github.com/pytorch/torchcodec>`__, where we'll
consolidate the future decoding/encoding capabilities of PyTorch
Args:
filename (str): path to the video file. If using the pyav backend, this can be whatever ``av.open`` accepts.
start_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional):
The start presentation time of the video
end_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional):
The end presentation time
pts_unit (str, optional): unit in which start_pts and end_pts values will be interpreted,
either 'pts' or 'sec'. Defaults to 'pts'.
output_format (str, optional): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
Returns:
vframes (Tensor[T, H, W, C] or Tensor[T, C, H, W]): the `T` video frames
aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points
info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int)
"""
_raise_video_deprecation_warning()
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_video)
output_format = output_format.upper()
if output_format not in ("THWC", "TCHW"):
raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.")
from torchvision import get_video_backend
if get_video_backend() != "pyav":
if not os.path.exists(filename):
raise RuntimeError(f"File not found: {filename}")
vframes, aframes, info = _video_opt._read_video(filename, start_pts, end_pts, pts_unit)
else:
_check_av_available()
if end_pts is None:
end_pts = float("inf")
if end_pts < start_pts:
raise ValueError(
f"end_pts should be larger than start_pts, got start_pts={start_pts} and end_pts={end_pts}"
)
info = {}
video_frames = []
audio_frames = []
audio_timebase = _video_opt.default_timebase
try:
with av.open(filename, metadata_errors="ignore") as container:
if container.streams.audio:
audio_timebase = container.streams.audio[0].time_base
if container.streams.video:
video_frames = _read_from_stream(
container,
start_pts,
end_pts,
pts_unit,
container.streams.video[0],
{"video": 0},
)
video_fps = container.streams.video[0].average_rate
# guard against potentially corrupted files
if video_fps is not None:
info["video_fps"] = float(video_fps)
if container.streams.audio:
audio_frames = _read_from_stream(
container,
start_pts,
end_pts,
pts_unit,
container.streams.audio[0],
{"audio": 0},
)
info["audio_fps"] = container.streams.audio[0].rate
except FFmpegError:
# TODO raise a warning?
pass
vframes_list = [frame.to_rgb().to_ndarray() for frame in video_frames]
aframes_list = [frame.to_ndarray() for frame in audio_frames]
if vframes_list:
vframes = torch.as_tensor(np.stack(vframes_list))
else:
vframes = torch.empty((0, 1, 1, 3), dtype=torch.uint8)
if aframes_list:
aframes = np.concatenate(aframes_list, 1)
aframes = torch.as_tensor(aframes)
if pts_unit == "sec":
start_pts = int(math.floor(start_pts * (1 / audio_timebase)))
if end_pts != float("inf"):
end_pts = int(math.ceil(end_pts * (1 / audio_timebase)))
aframes = _align_audio_frames(aframes, audio_frames, start_pts, end_pts)
else:
aframes = torch.empty((1, 0), dtype=torch.float32)
if output_format == "TCHW":
# [T,H,W,C] --> [T,C,H,W]
vframes = vframes.permute(0, 3, 1, 2)
return vframes, aframes, info
def _can_read_timestamps_from_packets(container: "av.container.Container") -> bool:
extradata = container.streams[0].codec_context.extradata
if extradata is None:
return False
if b"Lavc" in extradata:
return True
return False
def _decode_video_timestamps(container: "av.container.Container") -> List[int]:
if _can_read_timestamps_from_packets(container):
# fast path
return [x.pts for x in container.demux(video=0) if x.pts is not None]
else:
return [x.pts for x in container.decode(video=0) if x.pts is not None]
def read_video_timestamps(filename: str, pts_unit: str = "pts") -> Tuple[List[int], Optional[float]]:
"""[DEPREACTED] List the video frames timestamps.
.. warning::
DEPRECATED: All the video decoding and encoding capabilities of torchvision
are deprecated from version 0.22 and will be removed in version 0.24. We
recommend that you migrate to
`TorchCodec <https://github.com/pytorch/torchcodec>`__, where we'll
consolidate the future decoding/encoding capabilities of PyTorch
Note that the function decodes the whole video frame-by-frame.
Args:
filename (str): path to the video file
pts_unit (str, optional): unit in which timestamp values will be returned
either 'pts' or 'sec'. Defaults to 'pts'.
Returns:
pts (List[int] if pts_unit = 'pts', List[Fraction] if pts_unit = 'sec'):
presentation timestamps for each one of the frames in the video.
video_fps (float, optional): the frame rate for the video
"""
_raise_video_deprecation_warning()
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_video_timestamps)
from torchvision import get_video_backend
if get_video_backend() != "pyav":
return _video_opt._read_video_timestamps(filename, pts_unit)
_check_av_available()
video_fps = None
pts = []
try:
with av.open(filename, metadata_errors="ignore") as container:
if container.streams.video:
video_stream = container.streams.video[0]
video_time_base = video_stream.time_base
try:
pts = _decode_video_timestamps(container)
except FFmpegError:
warnings.warn(f"Failed decoding frames for file {filename}")
video_fps = float(video_stream.average_rate)
except FFmpegError as e:
msg = f"Failed to open container for {filename}; Caught error: {e}"
warnings.warn(msg, RuntimeWarning)
pts.sort()
if pts_unit == "sec":
pts = [x * video_time_base for x in pts]
return pts, video_fps
```
|
======================================================================================================================
SOURCE CODE FILE: video_reader.py
LINES: 1
SIZE: 11.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\io\video_reader.py
ENCODING: utf-8
```py
import io
import warnings
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
from ._video_deprecation_warning import _raise_video_deprecation_warning
from ._video_opt import _HAS_CPU_VIDEO_DECODER
if _HAS_CPU_VIDEO_DECODER:
def _has_video_opt() -> bool:
return True
else:
def _has_video_opt() -> bool:
return False
try:
import av
av.logging.set_level(av.logging.ERROR)
if not hasattr(av.video.frame.VideoFrame, "pict_type"):
av = ImportError(
"""\
Your version of PyAV is too old for the necessary video operations in torchvision.
If you are on Python 3.5, you will have to build from source (the conda-forge
packages are not up-to-date). See
https://github.com/mikeboers/PyAV#installation for instructions on how to
install PyAV on your system.
"""
)
except ImportError:
av = ImportError(
"""\
PyAV is not installed, and is necessary for the video operations in torchvision.
See https://github.com/mikeboers/PyAV#installation for instructions on how to
install PyAV on your system.
"""
)
class VideoReader:
"""[DEPRECATED] Fine-grained video-reading API.
Supports frame-by-frame reading of various streams from a single video
container. Much like previous video_reader API it supports the following
backends: video_reader, pyav, and cuda.
Backends can be set via `torchvision.set_video_backend` function.
.. warning::
DEPRECATED: All the video decoding and encoding capabilities of torchvision
are deprecated from version 0.22 and will be removed in version 0.24. We
recommend that you migrate to
`TorchCodec <https://github.com/pytorch/torchcodec>`__, where we'll
consolidate the future decoding/encoding capabilities of PyTorch
.. betastatus:: VideoReader class
Example:
The following examples creates a :mod:`VideoReader` object, seeks into 2s
point, and returns a single frame::
import torchvision
video_path = "path_to_a_test_video"
reader = torchvision.io.VideoReader(video_path, "video")
reader.seek(2.0)
frame = next(reader)
:mod:`VideoReader` implements the iterable API, which makes it suitable to
using it in conjunction with :mod:`itertools` for more advanced reading.
As such, we can use a :mod:`VideoReader` instance inside for loops::
reader.seek(2)
for frame in reader:
frames.append(frame['data'])
# additionally, `seek` implements a fluent API, so we can do
for frame in reader.seek(2):
frames.append(frame['data'])
With :mod:`itertools`, we can read all frames between 2 and 5 seconds with the
following code::
for frame in itertools.takewhile(lambda x: x['pts'] <= 5, reader.seek(2)):
frames.append(frame['data'])
and similarly, reading 10 frames after the 2s timestamp can be achieved
as follows::
for frame in itertools.islice(reader.seek(2), 10):
frames.append(frame['data'])
.. note::
Each stream descriptor consists of two parts: stream type (e.g. 'video') and
a unique stream id (which are determined by the video encoding).
In this way, if the video container contains multiple
streams of the same type, users can access the one they want.
If only stream type is passed, the decoder auto-detects first stream of that type.
Args:
src (string, bytes object, or tensor): The media source.
If string-type, it must be a file path supported by FFMPEG.
If bytes, should be an in-memory representation of a file supported by FFMPEG.
If Tensor, it is interpreted internally as byte buffer.
It must be one-dimensional, of type ``torch.uint8``.
stream (string, optional): descriptor of the required stream, followed by the stream id,
in the format ``{stream_type}:{stream_id}``. Defaults to ``"video:0"``.
Currently available options include ``['video', 'audio']``
num_threads (int, optional): number of threads used by the codec to decode video.
Default value (0) enables multithreading with codec-dependent heuristic. The performance
will depend on the version of FFMPEG codecs supported.
"""
def __init__(
self,
src: str,
stream: str = "video",
num_threads: int = 0,
) -> None:
_raise_video_deprecation_warning()
_log_api_usage_once(self)
from .. import get_video_backend
self.backend = get_video_backend()
if isinstance(src, str):
if not src:
raise ValueError("src cannot be empty")
elif isinstance(src, bytes):
if self.backend in ["cuda"]:
raise RuntimeError(
"VideoReader cannot be initialized from bytes object when using cuda or pyav backend."
)
elif self.backend == "pyav":
src = io.BytesIO(src)
else:
with warnings.catch_warnings():
# Ignore the warning because we actually don't modify the buffer in this function
warnings.filterwarnings("ignore", message="The given buffer is not writable")
src = torch.frombuffer(src, dtype=torch.uint8)
elif isinstance(src, torch.Tensor):
if self.backend in ["cuda", "pyav"]:
raise RuntimeError(
"VideoReader cannot be initialized from Tensor object when using cuda or pyav backend."
)
else:
raise ValueError(f"src must be either string, Tensor or bytes object. Got {type(src)}")
if self.backend == "cuda":
device = torch.device("cuda")
self._c = torch.classes.torchvision.GPUDecoder(src, device)
elif self.backend == "video_reader":
if isinstance(src, str):
self._c = torch.classes.torchvision.Video(src, stream, num_threads)
elif isinstance(src, torch.Tensor):
self._c = torch.classes.torchvision.Video("", "", 0)
self._c.init_from_memory(src, stream, num_threads)
elif self.backend == "pyav":
self.container = av.open(src, metadata_errors="ignore")
# TODO: load metadata
stream_type = stream.split(":")[0]
stream_id = 0 if len(stream.split(":")) == 1 else int(stream.split(":")[1])
self.pyav_stream = {stream_type: stream_id}
self._c = self.container.decode(**self.pyav_stream)
# TODO: add extradata exception
else:
raise RuntimeError("Unknown video backend: {}".format(self.backend))
def __next__(self) -> Dict[str, Any]:
"""Decodes and returns the next frame of the current stream.
Frames are encoded as a dict with mandatory
data and pts fields, where data is a tensor, and pts is a
presentation timestamp of the frame expressed in seconds
as a float.
Returns:
(dict): a dictionary and containing decoded frame (``data``)
and corresponding timestamp (``pts``) in seconds
"""
if self.backend == "cuda":
frame = self._c.next()
if frame.numel() == 0:
raise StopIteration
return {"data": frame, "pts": None}
elif self.backend == "video_reader":
frame, pts = self._c.next()
else:
try:
frame = next(self._c)
pts = float(frame.pts * frame.time_base)
if "video" in self.pyav_stream:
frame = torch.as_tensor(frame.to_rgb().to_ndarray()).permute(2, 0, 1)
elif "audio" in self.pyav_stream:
frame = torch.as_tensor(frame.to_ndarray()).permute(1, 0)
else:
frame = None
except av.error.EOFError:
raise StopIteration
if frame.numel() == 0:
raise StopIteration
return {"data": frame, "pts": pts}
def __iter__(self) -> Iterator[Dict[str, Any]]:
return self
def seek(self, time_s: float, keyframes_only: bool = False) -> "VideoReader":
"""Seek within current stream.
Args:
time_s (float): seek time in seconds
keyframes_only (bool): allow to seek only to keyframes
.. note::
Current implementation is the so-called precise seek. This
means following seek, call to :mod:`next()` will return the
frame with the exact timestamp if it exists or
the first frame with timestamp larger than ``time_s``.
"""
if self.backend in ["cuda", "video_reader"]:
self._c.seek(time_s, keyframes_only)
else:
# handle special case as pyav doesn't catch it
if time_s < 0:
time_s = 0
temp_str = self.container.streams.get(**self.pyav_stream)[0]
offset = int(round(time_s / temp_str.time_base))
if not keyframes_only:
warnings.warn("Accurate seek is not implemented for pyav backend")
self.container.seek(offset, backward=True, any_frame=False, stream=temp_str)
self._c = self.container.decode(**self.pyav_stream)
return self
def get_metadata(self) -> Dict[str, Any]:
"""Returns video metadata
Returns:
(dict): dictionary containing duration and frame rate for every stream
"""
if self.backend == "pyav":
metadata = {} # type: Dict[str, Any]
for stream in self.container.streams:
if stream.type not in metadata:
if stream.type == "video":
rate_n = "fps"
else:
rate_n = "framerate"
metadata[stream.type] = {rate_n: [], "duration": []}
rate = getattr(stream, "average_rate", None) or stream.sample_rate
metadata[stream.type]["duration"].append(float(stream.duration * stream.time_base))
metadata[stream.type][rate_n].append(float(rate))
return metadata
return self._c.get_metadata()
def set_current_stream(self, stream: str) -> bool:
"""Set current stream.
Explicitly define the stream we are operating on.
Args:
stream (string): descriptor of the required stream. Defaults to ``"video:0"``
Currently available stream types include ``['video', 'audio']``.
Each descriptor consists of two parts: stream type (e.g. 'video') and
a unique stream id (which are determined by video encoding).
In this way, if the video container contains multiple
streams of the same type, users can access the one they want.
If only stream type is passed, the decoder auto-detects first stream
of that type and returns it.
Returns:
(bool): True on success, False otherwise
"""
if self.backend == "cuda":
warnings.warn("GPU decoding only works with video stream.")
if self.backend == "pyav":
stream_type = stream.split(":")[0]
stream_id = 0 if len(stream.split(":")) == 1 else int(stream.split(":")[1])
self.pyav_stream = {stream_type: stream_id}
self._c = self.container.decode(**self.pyav_stream)
return True
return self._c.set_current_stream(stream)
```
|
======================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\__init__.py
ENCODING: utf-8
```py
from .alexnet import *
from .convnext import *
from .densenet import *
from .efficientnet import *
from .googlenet import *
from .inception import *
from .mnasnet import *
from .mobilenet import *
from .regnet import *
from .resnet import *
from .shufflenetv2 import *
from .squeezenet import *
from .vgg import *
from .vision_transformer import *
from .swin_transformer import *
from .maxvit import *
from . import detection, optical_flow, quantization, segmentation, video
# The Weights and WeightsEnum are developer-facing utils that we make public for
# downstream libs like torchgeo https://github.com/pytorch/vision/issues/7094
# TODO: we could / should document them publicly, but it's not clear where, as
# they're not intended for end users.
from ._api import get_model, get_model_builder, get_model_weights, get_weight, list_models, Weights, WeightsEnum
```
|
==================================================================================================================
SOURCE CODE FILE: _api.py
LINES: 1
SIZE: 10.01 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\_api.py
ENCODING: utf-8
```py
import fnmatch
import importlib
import inspect
import sys
from dataclasses import dataclass
from enum import Enum
from functools import partial
from inspect import signature
from types import ModuleType
from typing import Any, Callable, Dict, get_args, Iterable, List, Mapping, Optional, Set, Type, TypeVar, Union
from torch import nn
from .._internally_replaced_utils import load_state_dict_from_url
__all__ = ["WeightsEnum", "Weights", "get_model", "get_model_builder", "get_model_weights", "get_weight", "list_models"]
@dataclass
class Weights:
"""
This class is used to group important attributes associated with the pre-trained weights.
Args:
url (str): The location where we find the weights.
transforms (Callable): A callable that constructs the preprocessing method (or validation preset transforms)
needed to use the model. The reason we attach a constructor method rather than an already constructed
object is because the specific object might have memory and thus we want to delay initialization until
needed.
meta (Dict[str, Any]): Stores meta-data related to the weights of the model and its configuration. These can be
informative attributes (for example the number of parameters/flops, recipe link/methods used in training
etc), configuration parameters (for example the `num_classes`) needed to construct the model or important
meta-data (for example the `classes` of a classification model) needed to use the model.
"""
url: str
transforms: Callable
meta: Dict[str, Any]
def __eq__(self, other: Any) -> bool:
# We need this custom implementation for correct deep-copy and deserialization behavior.
# TL;DR: After the definition of an enum, creating a new instance, i.e. by deep-copying or deserializing it,
# involves an equality check against the defined members. Unfortunately, the `transforms` attribute is often
# defined with `functools.partial` and `fn = partial(...); assert deepcopy(fn) != fn`. Without custom handling
# for it, the check against the defined members would fail and effectively prevent the weights from being
# deep-copied or deserialized.
# See https://github.com/pytorch/vision/pull/7107 for details.
if not isinstance(other, Weights):
return NotImplemented
if self.url != other.url:
return False
if self.meta != other.meta:
return False
if isinstance(self.transforms, partial) and isinstance(other.transforms, partial):
return (
self.transforms.func == other.transforms.func
and self.transforms.args == other.transforms.args
and self.transforms.keywords == other.transforms.keywords
)
else:
return self.transforms == other.transforms
class WeightsEnum(Enum):
"""
This class is the parent class of all model weights. Each model building method receives an optional `weights`
parameter with its associated pre-trained weights. It inherits from `Enum` and its values should be of type
`Weights`.
Args:
value (Weights): The data class entry with the weight information.
"""
@classmethod
def verify(cls, obj: Any) -> Any:
if obj is not None:
if type(obj) is str:
obj = cls[obj.replace(cls.__name__ + ".", "")]
elif not isinstance(obj, cls):
raise TypeError(
f"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}."
)
return obj
def get_state_dict(self, *args: Any, **kwargs: Any) -> Mapping[str, Any]:
return load_state_dict_from_url(self.url, *args, **kwargs)
def __repr__(self) -> str:
return f"{self.__class__.__name__}.{self._name_}"
@property
def url(self):
return self.value.url
@property
def transforms(self):
return self.value.transforms
@property
def meta(self):
return self.value.meta
def get_weight(name: str) -> WeightsEnum:
"""
Gets the weights enum value by its full name. Example: "ResNet50_Weights.IMAGENET1K_V1"
Args:
name (str): The name of the weight enum entry.
Returns:
WeightsEnum: The requested weight enum.
"""
try:
enum_name, value_name = name.split(".")
except ValueError:
raise ValueError(f"Invalid weight name provided: '{name}'.")
base_module_name = ".".join(sys.modules[__name__].__name__.split(".")[:-1])
base_module = importlib.import_module(base_module_name)
model_modules = [base_module] + [
x[1]
for x in inspect.getmembers(base_module, inspect.ismodule)
if x[1].__file__.endswith("__init__.py") # type: ignore[union-attr]
]
weights_enum = None
for m in model_modules:
potential_class = m.__dict__.get(enum_name, None)
if potential_class is not None and issubclass(potential_class, WeightsEnum):
weights_enum = potential_class
break
if weights_enum is None:
raise ValueError(f"The weight enum '{enum_name}' for the specific method couldn't be retrieved.")
return weights_enum[value_name]
def get_model_weights(name: Union[Callable, str]) -> Type[WeightsEnum]:
"""
Returns the weights enum class associated to the given model.
Args:
name (callable or str): The model builder function or the name under which it is registered.
Returns:
weights_enum (WeightsEnum): The weights enum class associated with the model.
"""
model = get_model_builder(name) if isinstance(name, str) else name
return _get_enum_from_fn(model)
def _get_enum_from_fn(fn: Callable) -> Type[WeightsEnum]:
"""
Internal method that gets the weight enum of a specific model builder method.
Args:
fn (Callable): The builder method used to create the model.
Returns:
WeightsEnum: The requested weight enum.
"""
sig = signature(fn)
if "weights" not in sig.parameters:
raise ValueError("The method is missing the 'weights' argument.")
ann = sig.parameters["weights"].annotation
weights_enum = None
if isinstance(ann, type) and issubclass(ann, WeightsEnum):
weights_enum = ann
else:
# handle cases like Union[Optional, T]
for t in get_args(ann): # type: ignore[union-attr]
if isinstance(t, type) and issubclass(t, WeightsEnum):
weights_enum = t
break
if weights_enum is None:
raise ValueError(
"The WeightsEnum class for the specific method couldn't be retrieved. Make sure the typing info is correct."
)
return weights_enum
M = TypeVar("M", bound=nn.Module)
BUILTIN_MODELS = {}
def register_model(name: Optional[str] = None) -> Callable[[Callable[..., M]], Callable[..., M]]:
def wrapper(fn: Callable[..., M]) -> Callable[..., M]:
key = name if name is not None else fn.__name__
if key in BUILTIN_MODELS:
raise ValueError(f"An entry is already registered under the name '{key}'.")
BUILTIN_MODELS[key] = fn
return fn
return wrapper
def list_models(
module: Optional[ModuleType] = None,
include: Union[Iterable[str], str, None] = None,
exclude: Union[Iterable[str], str, None] = None,
) -> List[str]:
"""
Returns a list with the names of registered models.
Args:
module (ModuleType, optional): The module from which we want to extract the available models.
include (str or Iterable[str], optional): Filter(s) for including the models from the set of all models.
Filters are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
wildcards. In case of many filters, the results is the union of individual filters.
exclude (str or Iterable[str], optional): Filter(s) applied after include_filters to remove models.
Filter are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
wildcards. In case of many filters, the results is removal of all the models that match any individual filter.
Returns:
models (list): A list with the names of available models.
"""
all_models = {
k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__.rsplit(".", 1)[0] == module.__name__
}
if include:
models: Set[str] = set()
if isinstance(include, str):
include = [include]
for include_filter in include:
models = models | set(fnmatch.filter(all_models, include_filter))
else:
models = all_models
if exclude:
if isinstance(exclude, str):
exclude = [exclude]
for exclude_filter in exclude:
models = models - set(fnmatch.filter(all_models, exclude_filter))
return sorted(models)
def get_model_builder(name: str) -> Callable[..., nn.Module]:
"""
Gets the model name and returns the model builder method.
Args:
name (str): The name under which the model is registered.
Returns:
fn (Callable): The model builder method.
"""
name = name.lower()
try:
fn = BUILTIN_MODELS[name]
except KeyError:
raise ValueError(f"Unknown model {name}")
return fn
def get_model(name: str, **config: Any) -> nn.Module:
"""
Gets the model name and configuration and returns an instantiated model.
Args:
name (str): The name under which the model is registered.
**config (Any): parameters passed to the model builder method.
Returns:
model (nn.Module): The initialized model.
"""
fn = get_model_builder(name)
return fn(**config)
```
|
===================================================================================================================
SOURCE CODE FILE: _meta.py
LINES: 1
SIZE: 29.72 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\_meta.py
ENCODING: utf-8
```py
"""
This file is part of the private API. Please do not refer to any variables defined here directly as they will be
removed on future versions without warning.
"""
# This will eventually be replaced with a call at torchvision.datasets.info("imagenet").categories
_IMAGENET_CATEGORIES = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead",
"electric ray",
"stingray",
"cock",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel",
"kite",
"bald eagle",
"vulture",
"great grey owl",
"European fire salamander",
"common newt",
"eft",
"spotted salamander",
"axolotl",
"bullfrog",
"tree frog",
"tailed frog",
"loggerhead",
"leatherback turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"common iguana",
"American chameleon",
"whiptail",
"agama",
"frilled lizard",
"alligator lizard",
"Gila monster",
"green lizard",
"African chameleon",
"Komodo dragon",
"African crocodile",
"American alligator",
"triceratops",
"thunder snake",
"ringneck snake",
"hognose snake",
"green snake",
"king snake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"rock python",
"Indian cobra",
"green mamba",
"sea snake",
"horned viper",
"diamondback",
"sidewinder",
"trilobite",
"harvestman",
"scorpion",
"black and gold garden spider",
"barn spider",
"garden spider",
"black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie chicken",
"peacock",
"quail",
"partridge",
"African grey",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"American egret",
"bittern",
"crane bird",
"limpkin",
"European gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"red-backed sandpiper",
"redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog",
"Pekinese",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound",
"basset",
"beagle",
"bloodhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound",
"English foxhound",
"redbone",
"borzoi",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound",
"Norwegian elkhound",
"otterhound",
"Saluki",
"Scottish deerhound",
"Weimaraner",
"Staffordshire bullterrier",
"American Staffordshire terrier",
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier",
"Airedale",
"cairn",
"Australian terrier",
"Dandie Dinmont",
"Boston bull",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier",
"Tibetan terrier",
"silky terrier",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla",
"English setter",
"Irish setter",
"Gordon setter",
"Brittany spaniel",
"clumber",
"English springer",
"Welsh springer spaniel",
"cocker spaniel",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog",
"Shetland sheepdog",
"collie",
"Border collie",
"Bouvier des Flandres",
"Rottweiler",
"German shepherd",
"Doberman",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard",
"Eskimo dog",
"malamute",
"Siberian husky",
"dalmatian",
"affenpinscher",
"basenji",
"pug",
"Leonberg",
"Newfoundland",
"Great Pyrenees",
"Samoyed",
"Pomeranian",
"chow",
"keeshond",
"Brabancon griffon",
"Pembroke",
"Cardigan",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf",
"white wolf",
"red wolf",
"coyote",
"dingo",
"dhole",
"African hunting dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian cat",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"ice bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"long-horned beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket",
"walking stick",
"cockroach",
"mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"admiral",
"ringlet",
"monarch",
"cabbage butterfly",
"sulphur butterfly",
"lycaenid",
"starfish",
"sea urchin",
"sea cucumber",
"wood rabbit",
"hare",
"Angora",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"sorrel",
"zebra",
"hog",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram",
"bighorn",
"ibex",
"hartebeest",
"impala",
"gazelle",
"Arabian camel",
"llama",
"weasel",
"mink",
"polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas",
"baboon",
"macaque",
"langur",
"colobus",
"proboscis monkey",
"marmoset",
"capuchin",
"howler monkey",
"titi",
"spider monkey",
"squirrel monkey",
"Madagascar cat",
"indri",
"Indian elephant",
"African elephant",
"lesser panda",
"giant panda",
"barracouta",
"eel",
"coho",
"rock beauty",
"anemone fish",
"sturgeon",
"gar",
"lionfish",
"puffer",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibian",
"analog clock",
"apiary",
"apron",
"ashcan",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint",
"Band Aid",
"banjo",
"bannister",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"barrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap",
"bath towel",
"bathtub",
"beach wagon",
"beacon",
"beaker",
"bearskin",
"beer bottle",
"beer glass",
"bell cote",
"bib",
"bicycle-built-for-two",
"bikini",
"binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsled",
"bolo tie",
"bonnet",
"bookcase",
"bookshop",
"bottlecap",
"bow",
"bow tie",
"brass",
"brassiere",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"bullet train",
"butcher shop",
"cab",
"caldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"carpenter's kit",
"carton",
"car wheel",
"cash machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"cellular telephone",
"chain",
"chainlink fence",
"chain mail",
"chain saw",
"chest",
"chiffonier",
"chime",
"china cabinet",
"Christmas stocking",
"church",
"cinema",
"cleaver",
"cliff dwelling",
"cloak",
"clog",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil",
"combination lock",
"computer keyboard",
"confectionery",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishrag",
"dishwasher",
"disk brake",
"dock",
"dogsled",
"dome",
"doormat",
"drilling platform",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa",
"file",
"fireboat",
"fire engine",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gasmask",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golfcart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"grille",
"grocery store",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower",
"hand-held computer",
"handkerchief",
"hard disc",
"harmonica",
"harp",
"harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoopskirt",
"horizontal bar",
"horse cart",
"hourglass",
"iPod",
"iron",
"jack-o'-lantern",
"jean",
"jeep",
"jersey",
"jigsaw puzzle",
"jinrikisha",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"liner",
"lipstick",
"Loafer",
"lotion",
"loudspeaker",
"loupe",
"lumbermill",
"magnetic compass",
"mailbag",
"mailbox",
"maillot",
"maillot tank suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine chest",
"megalith",
"microphone",
"microwave",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter",
"mountain bike",
"mountain tent",
"mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"organ",
"oscilloscope",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle",
"paddlewheel",
"padlock",
"paintbrush",
"pajama",
"palace",
"panpipe",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"passenger car",
"patio",
"pay-phone",
"pedestal",
"pencil box",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"pick",
"pickelhaube",
"picket fence",
"pickup",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate",
"pitcher",
"plane",
"planetarium",
"plastic bag",
"plate rack",
"plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"pop bottle",
"pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"projectile",
"projector",
"puck",
"punching bag",
"purse",
"quill",
"quilt",
"racer",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"rubber eraser",
"rugby ball",
"rule",
"running shoe",
"safe",
"safety pin",
"saltshaker",
"sandal",
"sarong",
"sax",
"scabbard",
"scale",
"school bus",
"schooner",
"scoreboard",
"screen",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe shop",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar dish",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch",
"stove",
"strainer",
"streetcar",
"stretcher",
"studio couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglass",
"sunglasses",
"sunscreen",
"suspension bridge",
"swab",
"sweatshirt",
"swimming trunks",
"swing",
"switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy",
"television",
"tennis ball",
"thatch",
"theater curtain",
"thimble",
"thresher",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toyshop",
"tractor",
"trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright",
"vacuum",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"warplane",
"washbasin",
"washer",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool",
"worm fence",
"wreck",
"yawl",
"yurt",
"web site",
"comic book",
"crossword puzzle",
"street sign",
"traffic light",
"book jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"ice lolly",
"French loaf",
"bagel",
"pretzel",
"cheeseburger",
"hotdog",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce",
"dough",
"meat loaf",
"pizza",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeside",
"promontory",
"sandbar",
"seashore",
"valley",
"volcano",
"ballplayer",
"groom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"hip",
"buckeye",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn",
"earthstar",
"hen-of-the-woods",
"bolete",
"ear",
"toilet tissue",
]
# To be replaced with torchvision.datasets.info("coco").categories
_COCO_CATEGORIES = [
"__background__",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"N/A",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"N/A",
"backpack",
"umbrella",
"N/A",
"N/A",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"N/A",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"N/A",
"dining table",
"N/A",
"N/A",
"toilet",
"N/A",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"N/A",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
# To be replaced with torchvision.datasets.info("coco_kp")
_COCO_PERSON_CATEGORIES = ["no person", "person"]
_COCO_PERSON_KEYPOINT_NAMES = [
"nose",
"left_eye",
"right_eye",
"left_ear",
"right_ear",
"left_shoulder",
"right_shoulder",
"left_elbow",
"right_elbow",
"left_wrist",
"right_wrist",
"left_hip",
"right_hip",
"left_knee",
"right_knee",
"left_ankle",
"right_ankle",
]
# To be replaced with torchvision.datasets.info("voc").categories
_VOC_CATEGORIES = [
"__background__",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
# To be replaced with torchvision.datasets.info("kinetics400").categories
_KINETICS400_CATEGORIES = [
"abseiling",
"air drumming",
"answering questions",
"applauding",
"applying cream",
"archery",
"arm wrestling",
"arranging flowers",
"assembling computer",
"auctioning",
"baby waking up",
"baking cookies",
"balloon blowing",
"bandaging",
"barbequing",
"bartending",
"beatboxing",
"bee keeping",
"belly dancing",
"bench pressing",
"bending back",
"bending metal",
"biking through snow",
"blasting sand",
"blowing glass",
"blowing leaves",
"blowing nose",
"blowing out candles",
"bobsledding",
"bookbinding",
"bouncing on trampoline",
"bowling",
"braiding hair",
"breading or breadcrumbing",
"breakdancing",
"brush painting",
"brushing hair",
"brushing teeth",
"building cabinet",
"building shed",
"bungee jumping",
"busking",
"canoeing or kayaking",
"capoeira",
"carrying baby",
"cartwheeling",
"carving pumpkin",
"catching fish",
"catching or throwing baseball",
"catching or throwing frisbee",
"catching or throwing softball",
"celebrating",
"changing oil",
"changing wheel",
"checking tires",
"cheerleading",
"chopping wood",
"clapping",
"clay pottery making",
"clean and jerk",
"cleaning floor",
"cleaning gutters",
"cleaning pool",
"cleaning shoes",
"cleaning toilet",
"cleaning windows",
"climbing a rope",
"climbing ladder",
"climbing tree",
"contact juggling",
"cooking chicken",
"cooking egg",
"cooking on campfire",
"cooking sausages",
"counting money",
"country line dancing",
"cracking neck",
"crawling baby",
"crossing river",
"crying",
"curling hair",
"cutting nails",
"cutting pineapple",
"cutting watermelon",
"dancing ballet",
"dancing charleston",
"dancing gangnam style",
"dancing macarena",
"deadlifting",
"decorating the christmas tree",
"digging",
"dining",
"disc golfing",
"diving cliff",
"dodgeball",
"doing aerobics",
"doing laundry",
"doing nails",
"drawing",
"dribbling basketball",
"drinking",
"drinking beer",
"drinking shots",
"driving car",
"driving tractor",
"drop kicking",
"drumming fingers",
"dunking basketball",
"dying hair",
"eating burger",
"eating cake",
"eating carrots",
"eating chips",
"eating doughnuts",
"eating hotdog",
"eating ice cream",
"eating spaghetti",
"eating watermelon",
"egg hunting",
"exercising arm",
"exercising with an exercise ball",
"extinguishing fire",
"faceplanting",
"feeding birds",
"feeding fish",
"feeding goats",
"filling eyebrows",
"finger snapping",
"fixing hair",
"flipping pancake",
"flying kite",
"folding clothes",
"folding napkins",
"folding paper",
"front raises",
"frying vegetables",
"garbage collecting",
"gargling",
"getting a haircut",
"getting a tattoo",
"giving or receiving award",
"golf chipping",
"golf driving",
"golf putting",
"grinding meat",
"grooming dog",
"grooming horse",
"gymnastics tumbling",
"hammer throw",
"headbanging",
"headbutting",
"high jump",
"high kick",
"hitting baseball",
"hockey stop",
"holding snake",
"hopscotch",
"hoverboarding",
"hugging",
"hula hooping",
"hurdling",
"hurling (sport)",
"ice climbing",
"ice fishing",
"ice skating",
"ironing",
"javelin throw",
"jetskiing",
"jogging",
"juggling balls",
"juggling fire",
"juggling soccer ball",
"jumping into pool",
"jumpstyle dancing",
"kicking field goal",
"kicking soccer ball",
"kissing",
"kitesurfing",
"knitting",
"krumping",
"laughing",
"laying bricks",
"long jump",
"lunge",
"making a cake",
"making a sandwich",
"making bed",
"making jewelry",
"making pizza",
"making snowman",
"making sushi",
"making tea",
"marching",
"massaging back",
"massaging feet",
"massaging legs",
"massaging person's head",
"milking cow",
"mopping floor",
"motorcycling",
"moving furniture",
"mowing lawn",
"news anchoring",
"opening bottle",
"opening present",
"paragliding",
"parasailing",
"parkour",
"passing American football (in game)",
"passing American football (not in game)",
"peeling apples",
"peeling potatoes",
"petting animal (not cat)",
"petting cat",
"picking fruit",
"planting trees",
"plastering",
"playing accordion",
"playing badminton",
"playing bagpipes",
"playing basketball",
"playing bass guitar",
"playing cards",
"playing cello",
"playing chess",
"playing clarinet",
"playing controller",
"playing cricket",
"playing cymbals",
"playing didgeridoo",
"playing drums",
"playing flute",
"playing guitar",
"playing harmonica",
"playing harp",
"playing ice hockey",
"playing keyboard",
"playing kickball",
"playing monopoly",
"playing organ",
"playing paintball",
"playing piano",
"playing poker",
"playing recorder",
"playing saxophone",
"playing squash or racquetball",
"playing tennis",
"playing trombone",
"playing trumpet",
"playing ukulele",
"playing violin",
"playing volleyball",
"playing xylophone",
"pole vault",
"presenting weather forecast",
"pull ups",
"pumping fist",
"pumping gas",
"punching bag",
"punching person (boxing)",
"push up",
"pushing car",
"pushing cart",
"pushing wheelchair",
"reading book",
"reading newspaper",
"recording music",
"riding a bike",
"riding camel",
"riding elephant",
"riding mechanical bull",
"riding mountain bike",
"riding mule",
"riding or walking with horse",
"riding scooter",
"riding unicycle",
"ripping paper",
"robot dancing",
"rock climbing",
"rock scissors paper",
"roller skating",
"running on treadmill",
"sailing",
"salsa dancing",
"sanding floor",
"scrambling eggs",
"scuba diving",
"setting table",
"shaking hands",
"shaking head",
"sharpening knives",
"sharpening pencil",
"shaving head",
"shaving legs",
"shearing sheep",
"shining shoes",
"shooting basketball",
"shooting goal (soccer)",
"shot put",
"shoveling snow",
"shredding paper",
"shuffling cards",
"side kick",
"sign language interpreting",
"singing",
"situp",
"skateboarding",
"ski jumping",
"skiing (not slalom or crosscountry)",
"skiing crosscountry",
"skiing slalom",
"skipping rope",
"skydiving",
"slacklining",
"slapping",
"sled dog racing",
"smoking",
"smoking hookah",
"snatch weight lifting",
"sneezing",
"sniffing",
"snorkeling",
"snowboarding",
"snowkiting",
"snowmobiling",
"somersaulting",
"spinning poi",
"spray painting",
"spraying",
"springboard diving",
"squat",
"sticking tongue out",
"stomping grapes",
"stretching arm",
"stretching leg",
"strumming guitar",
"surfing crowd",
"surfing water",
"sweeping floor",
"swimming backstroke",
"swimming breast stroke",
"swimming butterfly stroke",
"swing dancing",
"swinging legs",
"swinging on something",
"sword fighting",
"tai chi",
"taking a shower",
"tango dancing",
"tap dancing",
"tapping guitar",
"tapping pen",
"tasting beer",
"tasting food",
"testifying",
"texting",
"throwing axe",
"throwing ball",
"throwing discus",
"tickling",
"tobogganing",
"tossing coin",
"tossing salad",
"training dog",
"trapezing",
"trimming or shaving beard",
"trimming trees",
"triple jump",
"tying bow tie",
"tying knot (not on a tie)",
"tying tie",
"unboxing",
"unloading truck",
"using computer",
"using remote controller (not gaming)",
"using segway",
"vault",
"waiting in line",
"walking the dog",
"washing dishes",
"washing feet",
"washing hair",
"washing hands",
"water skiing",
"water sliding",
"watering plants",
"waxing back",
"waxing chest",
"waxing eyebrows",
"waxing legs",
"weaving basket",
"welding",
"whistling",
"windsurfing",
"wrapping present",
"wrestling",
"writing",
"yawning",
"yoga",
"zumba",
]
```
|
====================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 10.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\_utils.py
ENCODING: utf-8
```py
import functools
import inspect
import warnings
from collections import OrderedDict
from typing import Any, Callable, Dict, Optional, Tuple, TypeVar, Union
from torch import nn
from .._utils import sequence_to_str
from ._api import WeightsEnum
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Args:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(weights=ResNet18_Weights.DEFAULT)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
_version = 2
__annotations__ = {
"return_layers": Dict[str, str],
}
def __init__(self, model: nn.Module, return_layers: Dict[str, str]) -> None:
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {str(k): str(v) for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super().__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
D = TypeVar("D")
def kwonly_to_pos_or_kw(fn: Callable[..., D]) -> Callable[..., D]:
"""Decorates a function that uses keyword only parameters to also allow them being passed as positionals.
For example, consider the use case of changing the signature of ``old_fn`` into the one from ``new_fn``:
.. code::
def old_fn(foo, bar, baz=None):
...
def new_fn(foo, *, bar, baz=None):
...
Calling ``old_fn("foo", "bar, "baz")`` was valid, but the same call is no longer valid with ``new_fn``. To keep BC
and at the same time warn the user of the deprecation, this decorator can be used:
.. code::
@kwonly_to_pos_or_kw
def new_fn(foo, *, bar, baz=None):
...
new_fn("foo", "bar, "baz")
"""
params = inspect.signature(fn).parameters
try:
keyword_only_start_idx = next(
idx for idx, param in enumerate(params.values()) if param.kind == param.KEYWORD_ONLY
)
except StopIteration:
raise TypeError(f"Found no keyword-only parameter on function '{fn.__name__}'") from None
keyword_only_params = tuple(inspect.signature(fn).parameters)[keyword_only_start_idx:]
@functools.wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> D:
args, keyword_only_args = args[:keyword_only_start_idx], args[keyword_only_start_idx:]
if keyword_only_args:
keyword_only_kwargs = dict(zip(keyword_only_params, keyword_only_args))
warnings.warn(
f"Using {sequence_to_str(tuple(keyword_only_kwargs.keys()), separate_last='and ')} as positional "
f"parameter(s) is deprecated since 0.13 and may be removed in the future. Please use keyword parameter(s) "
f"instead."
)
kwargs.update(keyword_only_kwargs)
return fn(*args, **kwargs)
return wrapper
W = TypeVar("W", bound=WeightsEnum)
M = TypeVar("M", bound=nn.Module)
V = TypeVar("V")
def handle_legacy_interface(**weights: Tuple[str, Union[Optional[W], Callable[[Dict[str, Any]], Optional[W]]]]):
"""Decorates a model builder with the new interface to make it compatible with the old.
In particular this handles two things:
1. Allows positional parameters again, but emits a deprecation warning in case they are used. See
:func:`torchvision.prototype.utils._internal.kwonly_to_pos_or_kw` for details.
2. Handles the default value change from ``pretrained=False`` to ``weights=None`` and ``pretrained=True`` to
``weights=Weights`` and emits a deprecation warning with instructions for the new interface.
Args:
**weights (Tuple[str, Union[Optional[W], Callable[[Dict[str, Any]], Optional[W]]]]): Deprecated parameter
name and default value for the legacy ``pretrained=True``. The default value can be a callable in which
case it will be called with a dictionary of the keyword arguments. The only key that is guaranteed to be in
the dictionary is the deprecated parameter name passed as first element in the tuple. All other parameters
should be accessed with :meth:`~dict.get`.
"""
def outer_wrapper(builder: Callable[..., M]) -> Callable[..., M]:
@kwonly_to_pos_or_kw
@functools.wraps(builder)
def inner_wrapper(*args: Any, **kwargs: Any) -> M:
for weights_param, (pretrained_param, default) in weights.items(): # type: ignore[union-attr]
# If neither the weights nor the pretrained parameter as passed, or the weights argument already use
# the new style arguments, there is nothing to do. Note that we cannot use `None` as sentinel for the
# weight argument, since it is a valid value.
sentinel = object()
weights_arg = kwargs.get(weights_param, sentinel)
if (
(weights_param not in kwargs and pretrained_param not in kwargs)
or isinstance(weights_arg, WeightsEnum)
or (isinstance(weights_arg, str) and weights_arg != "legacy")
or weights_arg is None
):
continue
# If the pretrained parameter was passed as positional argument, it is now mapped to
# `kwargs[weights_param]`. This happens because the @kwonly_to_pos_or_kw decorator uses the current
# signature to infer the names of positionally passed arguments and thus has no knowledge that there
# used to be a pretrained parameter.
pretrained_positional = weights_arg is not sentinel
if pretrained_positional:
# We put the pretrained argument under its legacy name in the keyword argument dictionary to have
# unified access to the value if the default value is a callable.
kwargs[pretrained_param] = pretrained_arg = kwargs.pop(weights_param)
else:
pretrained_arg = kwargs[pretrained_param]
if pretrained_arg:
default_weights_arg = default(kwargs) if callable(default) else default
if not isinstance(default_weights_arg, WeightsEnum):
raise ValueError(f"No weights available for model {builder.__name__}")
else:
default_weights_arg = None
if not pretrained_positional:
warnings.warn(
f"The parameter '{pretrained_param}' is deprecated since 0.13 and may be removed in the future, "
f"please use '{weights_param}' instead."
)
msg = (
f"Arguments other than a weight enum or `None` for '{weights_param}' are deprecated since 0.13 and "
f"may be removed in the future. "
f"The current behavior is equivalent to passing `{weights_param}={default_weights_arg}`."
)
if pretrained_arg:
msg = (
f"{msg} You can also use `{weights_param}={type(default_weights_arg).__name__}.DEFAULT` "
f"to get the most up-to-date weights."
)
warnings.warn(msg)
del kwargs[pretrained_param]
kwargs[weights_param] = default_weights_arg
return builder(*args, **kwargs)
return inner_wrapper
return outer_wrapper
def _ovewrite_named_param(kwargs: Dict[str, Any], param: str, new_value: V) -> None:
if param in kwargs:
if kwargs[param] != new_value:
raise ValueError(f"The parameter '{param}' expected value {new_value} but got {kwargs[param]} instead.")
else:
kwargs[param] = new_value
def _ovewrite_value_param(param: str, actual: Optional[V], expected: V) -> V:
if actual is not None:
if actual != expected:
raise ValueError(f"The parameter '{param}' expected value {expected} but got {actual} instead.")
return expected
class _ModelURLs(dict):
def __getitem__(self, item):
warnings.warn(
"Accessing the model URLs via the internal dictionary of the module is deprecated since 0.13 and may "
"be removed in the future. Please access them via the appropriate Weights Enum instead."
)
return super().__getitem__(item)
```
|
=====================================================================================================================
SOURCE CODE FILE: alexnet.py
LINES: 1
SIZE: 4.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\alexnet.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 61100840,
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_metrics": {
"ImageNet-1K": {
"acc@1": 56.522,
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_file_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
.. note::
AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
paper. Our implementation is based instead on the "One weird trick"
paper above.
Args:
weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.AlexNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.AlexNet_Weights
:members:
"""
weights = AlexNet_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = AlexNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
======================================================================================================================
SOURCE CODE FILE: convnext.py
LINES: 1
SIZE: 15.37 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\convnext.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Callable, List, Optional, Sequence
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from ..ops.misc import Conv2dNormActivation, Permute
from ..ops.stochastic_depth import StochasticDepth
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"ConvNeXt",
"ConvNeXt_Tiny_Weights",
"ConvNeXt_Small_Weights",
"ConvNeXt_Base_Weights",
"ConvNeXt_Large_Weights",
"convnext_tiny",
"convnext_small",
"convnext_base",
"convnext_large",
]
class LayerNorm2d(nn.LayerNorm):
def forward(self, x: Tensor) -> Tensor:
x = x.permute(0, 2, 3, 1)
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(0, 3, 1, 2)
return x
class CNBlock(nn.Module):
def __init__(
self,
dim,
layer_scale: float,
stochastic_depth_prob: float,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.block = nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim, bias=True),
Permute([0, 2, 3, 1]),
norm_layer(dim),
nn.Linear(in_features=dim, out_features=4 * dim, bias=True),
nn.GELU(),
nn.Linear(in_features=4 * dim, out_features=dim, bias=True),
Permute([0, 3, 1, 2]),
)
self.layer_scale = nn.Parameter(torch.ones(dim, 1, 1) * layer_scale)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
def forward(self, input: Tensor) -> Tensor:
result = self.layer_scale * self.block(input)
result = self.stochastic_depth(result)
result += input
return result
class CNBlockConfig:
# Stores information listed at Section 3 of the ConvNeXt paper
def __init__(
self,
input_channels: int,
out_channels: Optional[int],
num_layers: int,
) -> None:
self.input_channels = input_channels
self.out_channels = out_channels
self.num_layers = num_layers
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "input_channels={input_channels}"
s += ", out_channels={out_channels}"
s += ", num_layers={num_layers}"
s += ")"
return s.format(**self.__dict__)
class ConvNeXt(nn.Module):
def __init__(
self,
block_setting: List[CNBlockConfig],
stochastic_depth_prob: float = 0.0,
layer_scale: float = 1e-6,
num_classes: int = 1000,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
**kwargs: Any,
) -> None:
super().__init__()
_log_api_usage_once(self)
if not block_setting:
raise ValueError("The block_setting should not be empty")
elif not (isinstance(block_setting, Sequence) and all([isinstance(s, CNBlockConfig) for s in block_setting])):
raise TypeError("The block_setting should be List[CNBlockConfig]")
if block is None:
block = CNBlock
if norm_layer is None:
norm_layer = partial(LayerNorm2d, eps=1e-6)
layers: List[nn.Module] = []
# Stem
firstconv_output_channels = block_setting[0].input_channels
layers.append(
Conv2dNormActivation(
3,
firstconv_output_channels,
kernel_size=4,
stride=4,
padding=0,
norm_layer=norm_layer,
activation_layer=None,
bias=True,
)
)
total_stage_blocks = sum(cnf.num_layers for cnf in block_setting)
stage_block_id = 0
for cnf in block_setting:
# Bottlenecks
stage: List[nn.Module] = []
for _ in range(cnf.num_layers):
# adjust stochastic depth probability based on the depth of the stage block
sd_prob = stochastic_depth_prob * stage_block_id / (total_stage_blocks - 1.0)
stage.append(block(cnf.input_channels, layer_scale, sd_prob))
stage_block_id += 1
layers.append(nn.Sequential(*stage))
if cnf.out_channels is not None:
# Downsampling
layers.append(
nn.Sequential(
norm_layer(cnf.input_channels),
nn.Conv2d(cnf.input_channels, cnf.out_channels, kernel_size=2, stride=2),
)
)
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
lastblock = block_setting[-1]
lastconv_output_channels = (
lastblock.out_channels if lastblock.out_channels is not None else lastblock.input_channels
)
self.classifier = nn.Sequential(
norm_layer(lastconv_output_channels), nn.Flatten(1), nn.Linear(lastconv_output_channels, num_classes)
)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _convnext(
block_setting: List[CNBlockConfig],
stochastic_depth_prob: float,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> ConvNeXt:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = ConvNeXt(block_setting, stochastic_depth_prob=stochastic_depth_prob, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (32, 32),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#convnext",
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
}
class ConvNeXt_Tiny_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/convnext_tiny-983f1562.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=236),
meta={
**_COMMON_META,
"num_params": 28589128,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.520,
"acc@5": 96.146,
}
},
"_ops": 4.456,
"_file_size": 109.119,
},
)
DEFAULT = IMAGENET1K_V1
class ConvNeXt_Small_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/convnext_small-0c510722.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=230),
meta={
**_COMMON_META,
"num_params": 50223688,
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.616,
"acc@5": 96.650,
}
},
"_ops": 8.684,
"_file_size": 191.703,
},
)
DEFAULT = IMAGENET1K_V1
class ConvNeXt_Base_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/convnext_base-6075fbad.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 88591464,
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.062,
"acc@5": 96.870,
}
},
"_ops": 15.355,
"_file_size": 338.064,
},
)
DEFAULT = IMAGENET1K_V1
class ConvNeXt_Large_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/convnext_large-ea097f82.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 197767336,
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.414,
"acc@5": 96.976,
}
},
"_ops": 34.361,
"_file_size": 754.537,
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", ConvNeXt_Tiny_Weights.IMAGENET1K_V1))
def convnext_tiny(*, weights: Optional[ConvNeXt_Tiny_Weights] = None, progress: bool = True, **kwargs: Any) -> ConvNeXt:
"""ConvNeXt Tiny model architecture from the
`A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
Args:
weights (:class:`~torchvision.models.convnext.ConvNeXt_Tiny_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Tiny_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ConvNeXt_Tiny_Weights
:members:
"""
weights = ConvNeXt_Tiny_Weights.verify(weights)
block_setting = [
CNBlockConfig(96, 192, 3),
CNBlockConfig(192, 384, 3),
CNBlockConfig(384, 768, 9),
CNBlockConfig(768, None, 3),
]
stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.1)
return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ConvNeXt_Small_Weights.IMAGENET1K_V1))
def convnext_small(
*, weights: Optional[ConvNeXt_Small_Weights] = None, progress: bool = True, **kwargs: Any
) -> ConvNeXt:
"""ConvNeXt Small model architecture from the
`A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
Args:
weights (:class:`~torchvision.models.convnext.ConvNeXt_Small_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Small_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ConvNeXt_Small_Weights
:members:
"""
weights = ConvNeXt_Small_Weights.verify(weights)
block_setting = [
CNBlockConfig(96, 192, 3),
CNBlockConfig(192, 384, 3),
CNBlockConfig(384, 768, 27),
CNBlockConfig(768, None, 3),
]
stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.4)
return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ConvNeXt_Base_Weights.IMAGENET1K_V1))
def convnext_base(*, weights: Optional[ConvNeXt_Base_Weights] = None, progress: bool = True, **kwargs: Any) -> ConvNeXt:
"""ConvNeXt Base model architecture from the
`A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
Args:
weights (:class:`~torchvision.models.convnext.ConvNeXt_Base_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Base_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ConvNeXt_Base_Weights
:members:
"""
weights = ConvNeXt_Base_Weights.verify(weights)
block_setting = [
CNBlockConfig(128, 256, 3),
CNBlockConfig(256, 512, 3),
CNBlockConfig(512, 1024, 27),
CNBlockConfig(1024, None, 3),
]
stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.5)
return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ConvNeXt_Large_Weights.IMAGENET1K_V1))
def convnext_large(
*, weights: Optional[ConvNeXt_Large_Weights] = None, progress: bool = True, **kwargs: Any
) -> ConvNeXt:
"""ConvNeXt Large model architecture from the
`A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
Args:
weights (:class:`~torchvision.models.convnext.ConvNeXt_Large_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Large_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ConvNeXt_Large_Weights
:members:
"""
weights = ConvNeXt_Large_Weights.verify(weights)
block_setting = [
CNBlockConfig(192, 384, 3),
CNBlockConfig(384, 768, 3),
CNBlockConfig(768, 1536, 27),
CNBlockConfig(1536, None, 3),
]
stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.5)
return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
```
|
======================================================================================================================
SOURCE CODE FILE: densenet.py
LINES: 1
SIZE: 16.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\densenet.py
ENCODING: utf-8
```py
import re
from collections import OrderedDict
from functools import partial
from typing import Any, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch import Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"DenseNet",
"DenseNet121_Weights",
"DenseNet161_Weights",
"DenseNet169_Weights",
"DenseNet201_Weights",
"densenet121",
"densenet161",
"densenet169",
"densenet201",
]
class _DenseLayer(nn.Module):
def __init__(
self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False
) -> None:
super().__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
self.norm2 = nn.BatchNorm2d(bn_size * growth_rate)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input, use_reentrant=False)
@torch.jit._overload_method # noqa: F811
def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input: Tensor) -> Tensor: # noqa: F811
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input: Tensor) -> Tensor: # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False,
) -> None:
super().__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super().__init__()
self.norm = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
def __init__(
self,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
num_classes: int = 1000,
memory_efficient: bool = False,
) -> None:
super().__init__()
_log_api_usage_once(self)
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
def _load_state_dict(model: nn.Module, weights: WeightsEnum, progress: bool) -> None:
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
)
state_dict = weights.get_state_dict(progress=progress, check_hash=True)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
def _densenet(
growth_rate: int,
block_config: Tuple[int, int, int, int],
num_init_features: int,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> DenseNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if weights is not None:
_load_state_dict(model=model, weights=weights, progress=progress)
return model
_COMMON_META = {
"min_size": (29, 29),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/pull/116",
"_docs": """These weights are ported from LuaTorch.""",
}
class DenseNet121_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet121-a639ec97.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 7978856,
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.434,
"acc@5": 91.972,
}
},
"_ops": 2.834,
"_file_size": 30.845,
},
)
DEFAULT = IMAGENET1K_V1
class DenseNet161_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet161-8d451a50.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 28681000,
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.138,
"acc@5": 93.560,
}
},
"_ops": 7.728,
"_file_size": 110.369,
},
)
DEFAULT = IMAGENET1K_V1
class DenseNet169_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet169-b2777c0a.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 14149480,
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.600,
"acc@5": 92.806,
}
},
"_ops": 3.36,
"_file_size": 54.708,
},
)
DEFAULT = IMAGENET1K_V1
class DenseNet201_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet201-c1103571.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 20013928,
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.896,
"acc@5": 93.370,
}
},
"_ops": 4.291,
"_file_size": 77.373,
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet121_Weights.IMAGENET1K_V1))
def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-121 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet121_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet121_Weights
:members:
"""
weights = DenseNet121_Weights.verify(weights)
return _densenet(32, (6, 12, 24, 16), 64, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet161_Weights.IMAGENET1K_V1))
def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-161 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet161_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet161_Weights
:members:
"""
weights = DenseNet161_Weights.verify(weights)
return _densenet(48, (6, 12, 36, 24), 96, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet169_Weights.IMAGENET1K_V1))
def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-169 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet169_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet169_Weights
:members:
"""
weights = DenseNet169_Weights.verify(weights)
return _densenet(32, (6, 12, 32, 32), 64, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet201_Weights.IMAGENET1K_V1))
def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-201 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet201_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet201_Weights
:members:
"""
weights = DenseNet201_Weights.verify(weights)
return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs)
```
|
================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\__init__.py
ENCODING: utf-8
```py
from .faster_rcnn import *
from .fcos import *
from .keypoint_rcnn import *
from .mask_rcnn import *
from .retinanet import *
from .ssd import *
from .ssdlite import *
```
|
==============================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 22.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\_utils.py
ENCODING: utf-8
```py
import math
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torchvision.ops import complete_box_iou_loss, distance_box_iou_loss, FrozenBatchNorm2d, generalized_box_iou_loss
class BalancedPositiveNegativeSampler:
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image: int, positive_fraction: float) -> None:
"""
Args:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentage of positive elements per batch
"""
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:
"""
Args:
matched_idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
positives.
Returns:
pos_idx (list[tensor])
neg_idx (list[tensor])
Returns two lists of binary masks for each image.
The first list contains the positive elements that were selected,
and the second list the negative example.
"""
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.where(matched_idxs_per_image >= 1)[0]
negative = torch.where(matched_idxs_per_image == 0)[0]
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = self.batch_size_per_image - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
# create binary mask from indices
pos_idx_per_image_mask = torch.zeros_like(matched_idxs_per_image, dtype=torch.uint8)
neg_idx_per_image_mask = torch.zeros_like(matched_idxs_per_image, dtype=torch.uint8)
pos_idx_per_image_mask[pos_idx_per_image] = 1
neg_idx_per_image_mask[neg_idx_per_image] = 1
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
@torch.jit._script_if_tracing
def encode_boxes(reference_boxes: Tensor, proposals: Tensor, weights: Tensor) -> Tensor:
"""
Encode a set of proposals with respect to some
reference boxes
Args:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
weights (Tensor[4]): the weights for ``(x, y, w, h)``
"""
# perform some unpacking to make it JIT-fusion friendly
wx = weights[0]
wy = weights[1]
ww = weights[2]
wh = weights[3]
proposals_x1 = proposals[:, 0].unsqueeze(1)
proposals_y1 = proposals[:, 1].unsqueeze(1)
proposals_x2 = proposals[:, 2].unsqueeze(1)
proposals_y2 = proposals[:, 3].unsqueeze(1)
reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1)
reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1)
reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1)
reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1)
# implementation starts here
ex_widths = proposals_x2 - proposals_x1
ex_heights = proposals_y2 - proposals_y1
ex_ctr_x = proposals_x1 + 0.5 * ex_widths
ex_ctr_y = proposals_y1 + 0.5 * ex_heights
gt_widths = reference_boxes_x2 - reference_boxes_x1
gt_heights = reference_boxes_y2 - reference_boxes_y1
gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths
gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
class BoxCoder:
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
"""
def __init__(
self, weights: Tuple[float, float, float, float], bbox_xform_clip: float = math.log(1000.0 / 16)
) -> None:
"""
Args:
weights (4-element tuple)
bbox_xform_clip (float)
"""
self.weights = weights
self.bbox_xform_clip = bbox_xform_clip
def encode(self, reference_boxes: List[Tensor], proposals: List[Tensor]) -> List[Tensor]:
boxes_per_image = [len(b) for b in reference_boxes]
reference_boxes = torch.cat(reference_boxes, dim=0)
proposals = torch.cat(proposals, dim=0)
targets = self.encode_single(reference_boxes, proposals)
return targets.split(boxes_per_image, 0)
def encode_single(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor:
"""
Encode a set of proposals with respect to some
reference boxes
Args:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
dtype = reference_boxes.dtype
device = reference_boxes.device
weights = torch.as_tensor(self.weights, dtype=dtype, device=device)
targets = encode_boxes(reference_boxes, proposals, weights)
return targets
def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor:
torch._assert(
isinstance(boxes, (list, tuple)),
"This function expects boxes of type list or tuple.",
)
torch._assert(
isinstance(rel_codes, torch.Tensor),
"This function expects rel_codes of type torch.Tensor.",
)
boxes_per_image = [b.size(0) for b in boxes]
concat_boxes = torch.cat(boxes, dim=0)
box_sum = 0
for val in boxes_per_image:
box_sum += val
if box_sum > 0:
rel_codes = rel_codes.reshape(box_sum, -1)
pred_boxes = self.decode_single(rel_codes, concat_boxes)
if box_sum > 0:
pred_boxes = pred_boxes.reshape(box_sum, -1, 4)
return pred_boxes
def decode_single(self, rel_codes: Tensor, boxes: Tensor) -> Tensor:
"""
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Args:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
"""
boxes = boxes.to(rel_codes.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = rel_codes[:, 0::4] / wx
dy = rel_codes[:, 1::4] / wy
dw = rel_codes[:, 2::4] / ww
dh = rel_codes[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.bbox_xform_clip)
dh = torch.clamp(dh, max=self.bbox_xform_clip)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
# Distance from center to box's corner.
c_to_c_h = torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h
c_to_c_w = torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w
pred_boxes1 = pred_ctr_x - c_to_c_w
pred_boxes2 = pred_ctr_y - c_to_c_h
pred_boxes3 = pred_ctr_x + c_to_c_w
pred_boxes4 = pred_ctr_y + c_to_c_h
pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1)
return pred_boxes
class BoxLinearCoder:
"""
The linear box-to-box transform defined in FCOS. The transformation is parameterized
by the distance from the center of (square) src box to 4 edges of the target box.
"""
def __init__(self, normalize_by_size: bool = True) -> None:
"""
Args:
normalize_by_size (bool): normalize deltas by the size of src (anchor) boxes.
"""
self.normalize_by_size = normalize_by_size
def encode(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor:
"""
Encode a set of proposals with respect to some reference boxes
Args:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
Returns:
Tensor: the encoded relative box offsets that can be used to
decode the boxes.
"""
# get the center of reference_boxes
reference_boxes_ctr_x = 0.5 * (reference_boxes[..., 0] + reference_boxes[..., 2])
reference_boxes_ctr_y = 0.5 * (reference_boxes[..., 1] + reference_boxes[..., 3])
# get box regression transformation deltas
target_l = reference_boxes_ctr_x - proposals[..., 0]
target_t = reference_boxes_ctr_y - proposals[..., 1]
target_r = proposals[..., 2] - reference_boxes_ctr_x
target_b = proposals[..., 3] - reference_boxes_ctr_y
targets = torch.stack((target_l, target_t, target_r, target_b), dim=-1)
if self.normalize_by_size:
reference_boxes_w = reference_boxes[..., 2] - reference_boxes[..., 0]
reference_boxes_h = reference_boxes[..., 3] - reference_boxes[..., 1]
reference_boxes_size = torch.stack(
(reference_boxes_w, reference_boxes_h, reference_boxes_w, reference_boxes_h), dim=-1
)
targets = targets / reference_boxes_size
return targets
def decode(self, rel_codes: Tensor, boxes: Tensor) -> Tensor:
"""
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Args:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
Returns:
Tensor: the predicted boxes with the encoded relative box offsets.
.. note::
This method assumes that ``rel_codes`` and ``boxes`` have same size for 0th dimension. i.e. ``len(rel_codes) == len(boxes)``.
"""
boxes = boxes.to(dtype=rel_codes.dtype)
ctr_x = 0.5 * (boxes[..., 0] + boxes[..., 2])
ctr_y = 0.5 * (boxes[..., 1] + boxes[..., 3])
if self.normalize_by_size:
boxes_w = boxes[..., 2] - boxes[..., 0]
boxes_h = boxes[..., 3] - boxes[..., 1]
list_box_size = torch.stack((boxes_w, boxes_h, boxes_w, boxes_h), dim=-1)
rel_codes = rel_codes * list_box_size
pred_boxes1 = ctr_x - rel_codes[..., 0]
pred_boxes2 = ctr_y - rel_codes[..., 1]
pred_boxes3 = ctr_x + rel_codes[..., 2]
pred_boxes4 = ctr_y + rel_codes[..., 3]
pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=-1)
return pred_boxes
class Matcher:
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
__annotations__ = {
"BELOW_LOW_THRESHOLD": int,
"BETWEEN_THRESHOLDS": int,
}
def __init__(self, high_threshold: float, low_threshold: float, allow_low_quality_matches: bool = False) -> None:
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
self.BELOW_LOW_THRESHOLD = -1
self.BETWEEN_THRESHOLDS = -2
torch._assert(low_threshold <= high_threshold, "low_threshold should be <= high_threshold")
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix: Tensor) -> Tensor:
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError("No ground-truth boxes available for one of the images during training")
else:
raise ValueError("No proposal boxes available for one of the images during training")
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
else:
all_matches = None # type: ignore[assignment]
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (matched_vals < self.high_threshold)
matches[below_low_threshold] = self.BELOW_LOW_THRESHOLD
matches[between_thresholds] = self.BETWEEN_THRESHOLDS
if self.allow_low_quality_matches:
if all_matches is None:
torch._assert(False, "all_matches should not be None")
else:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches: Tensor, all_matches: Tensor, match_quality_matrix: Tensor) -> None:
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has the highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find the highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.where(match_quality_matrix == highest_quality_foreach_gt[:, None])
# Example gt_pred_pairs_of_highest_quality:
# (tensor([0, 1, 1, 2, 2, 3, 3, 4, 5, 5]),
# tensor([39796, 32055, 32070, 39190, 40255, 40390, 41455, 45470, 45325, 46390]))
# Each element in the first tensor is a gt index, and each element in second tensor is a prediction index
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
class SSDMatcher(Matcher):
def __init__(self, threshold: float) -> None:
super().__init__(threshold, threshold, allow_low_quality_matches=False)
def __call__(self, match_quality_matrix: Tensor) -> Tensor:
matches = super().__call__(match_quality_matrix)
# For each gt, find the prediction with which it has the highest quality
_, highest_quality_pred_foreach_gt = match_quality_matrix.max(dim=1)
matches[highest_quality_pred_foreach_gt] = torch.arange(
highest_quality_pred_foreach_gt.size(0), dtype=torch.int64, device=highest_quality_pred_foreach_gt.device
)
return matches
def overwrite_eps(model: nn.Module, eps: float) -> None:
"""
This method overwrites the default eps values of all the
FrozenBatchNorm2d layers of the model with the provided value.
This is necessary to address the BC-breaking change introduced
by the bug-fix at pytorch/vision#2933. The overwrite is applied
only when the pretrained weights are loaded to maintain compatibility
with previous versions.
Args:
model (nn.Module): The model on which we perform the overwrite.
eps (float): The new value of eps.
"""
for module in model.modules():
if isinstance(module, FrozenBatchNorm2d):
module.eps = eps
def retrieve_out_channels(model: nn.Module, size: Tuple[int, int]) -> List[int]:
"""
This method retrieves the number of output channels of a specific model.
Args:
model (nn.Module): The model for which we estimate the out_channels.
It should return a single Tensor or an OrderedDict[Tensor].
size (Tuple[int, int]): The size (wxh) of the input.
Returns:
out_channels (List[int]): A list of the output channels of the model.
"""
in_training = model.training
model.eval()
with torch.no_grad():
# Use dummy data to retrieve the feature map sizes to avoid hard-coding their values
device = next(model.parameters()).device
tmp_img = torch.zeros((1, 3, size[1], size[0]), device=device)
features = model(tmp_img)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
out_channels = [x.size(1) for x in features.values()]
if in_training:
model.train()
return out_channels
@torch.jit.unused
def _fake_cast_onnx(v: Tensor) -> int:
return v # type: ignore[return-value]
def _topk_min(input: Tensor, orig_kval: int, axis: int) -> int:
"""
ONNX spec requires the k-value to be less than or equal to the number of inputs along
provided dim. Certain models use the number of elements along a particular axis instead of K
if K exceeds the number of elements along that axis. Previously, python's min() function was
used to determine whether to use the provided k-value or the specified dim axis value.
However, in cases where the model is being exported in tracing mode, python min() is
static causing the model to be traced incorrectly and eventually fail at the topk node.
In order to avoid this situation, in tracing mode, torch.min() is used instead.
Args:
input (Tensor): The original input tensor.
orig_kval (int): The provided k-value.
axis(int): Axis along which we retrieve the input size.
Returns:
min_kval (int): Appropriately selected k-value.
"""
if not torch.jit.is_tracing():
return min(orig_kval, input.size(axis))
axis_dim_val = torch._shape_as_tensor(input)[axis].unsqueeze(0)
min_kval = torch.min(torch.cat((torch.tensor([orig_kval], dtype=axis_dim_val.dtype), axis_dim_val), 0))
return _fake_cast_onnx(min_kval)
def _box_loss(
type: str,
box_coder: BoxCoder,
anchors_per_image: Tensor,
matched_gt_boxes_per_image: Tensor,
bbox_regression_per_image: Tensor,
cnf: Optional[Dict[str, float]] = None,
) -> Tensor:
torch._assert(type in ["l1", "smooth_l1", "ciou", "diou", "giou"], f"Unsupported loss: {type}")
if type == "l1":
target_regression = box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image)
return F.l1_loss(bbox_regression_per_image, target_regression, reduction="sum")
elif type == "smooth_l1":
target_regression = box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image)
beta = cnf["beta"] if cnf is not None and "beta" in cnf else 1.0
return F.smooth_l1_loss(bbox_regression_per_image, target_regression, reduction="sum", beta=beta)
else:
bbox_per_image = box_coder.decode_single(bbox_regression_per_image, anchors_per_image)
eps = cnf["eps"] if cnf is not None and "eps" in cnf else 1e-7
if type == "ciou":
return complete_box_iou_loss(bbox_per_image, matched_gt_boxes_per_image, reduction="sum", eps=eps)
if type == "diou":
return distance_box_iou_loss(bbox_per_image, matched_gt_boxes_per_image, reduction="sum", eps=eps)
# otherwise giou
return generalized_box_iou_loss(bbox_per_image, matched_gt_boxes_per_image, reduction="sum", eps=eps)
```
|
====================================================================================================================================
SOURCE CODE FILE: anchor_utils.py
LINES: 1
SIZE: 11.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\anchor_utils.py
ENCODING: utf-8
```py
import math
from typing import List, Optional
import torch
from torch import nn, Tensor
from .image_list import ImageList
class AnchorGenerator(nn.Module):
"""
Module that generates anchors for a set of feature maps and
image sizes.
The module support computing anchors at multiple sizes and aspect ratios
per feature map. This module assumes aspect ratio = height / width for
each anchor.
sizes and aspect_ratios should have the same number of elements, and it should
correspond to the number of feature maps.
sizes[i] and aspect_ratios[i] can have an arbitrary number of elements,
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
per spatial location for feature map i.
Args:
sizes (Tuple[Tuple[int]]):
aspect_ratios (Tuple[Tuple[float]]):
"""
__annotations__ = {
"cell_anchors": List[torch.Tensor],
}
def __init__(
self,
sizes=((128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),),
):
super().__init__()
if not isinstance(sizes[0], (list, tuple)):
# TODO change this
sizes = tuple((s,) for s in sizes)
if not isinstance(aspect_ratios[0], (list, tuple)):
aspect_ratios = (aspect_ratios,) * len(sizes)
self.sizes = sizes
self.aspect_ratios = aspect_ratios
self.cell_anchors = [
self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(sizes, aspect_ratios)
]
# TODO: https://github.com/pytorch/pytorch/issues/26792
# For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values.
# (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios)
# This method assumes aspect ratio = height / width for an anchor.
def generate_anchors(
self,
scales: List[int],
aspect_ratios: List[float],
dtype: torch.dtype = torch.float32,
device: torch.device = torch.device("cpu"),
) -> Tensor:
scales = torch.as_tensor(scales, dtype=dtype, device=device)
aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)
h_ratios = torch.sqrt(aspect_ratios)
w_ratios = 1 / h_ratios
ws = (w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h_ratios[:, None] * scales[None, :]).view(-1)
base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2
return base_anchors.round()
def set_cell_anchors(self, dtype: torch.dtype, device: torch.device):
self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors]
def num_anchors_per_location(self) -> List[int]:
return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]
# For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2),
# output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a.
def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]:
anchors = []
cell_anchors = self.cell_anchors
torch._assert(cell_anchors is not None, "cell_anchors should not be None")
torch._assert(
len(grid_sizes) == len(strides) == len(cell_anchors),
"Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified.",
)
for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
grid_height, grid_width = size
stride_height, stride_width = stride
device = base_anchors.device
# For output anchor, compute [x_center, y_center, x_center, y_center]
shifts_x = torch.arange(0, grid_width, dtype=torch.int32, device=device) * stride_width
shifts_y = torch.arange(0, grid_height, dtype=torch.int32, device=device) * stride_height
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij")
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
# For every (base anchor, output anchor) pair,
# offset each zero-centered base anchor by the center of the output anchor.
anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))
return anchors
def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]:
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
image_size = image_list.tensors.shape[-2:]
dtype, device = feature_maps[0].dtype, feature_maps[0].device
strides = [
[
torch.empty((), dtype=torch.int64, device=device).fill_(image_size[0] // g[0]),
torch.empty((), dtype=torch.int64, device=device).fill_(image_size[1] // g[1]),
]
for g in grid_sizes
]
self.set_cell_anchors(dtype, device)
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
anchors: List[List[torch.Tensor]] = []
for _ in range(len(image_list.image_sizes)):
anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps]
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
return anchors
class DefaultBoxGenerator(nn.Module):
"""
This module generates the default boxes of SSD for a set of feature maps and image sizes.
Args:
aspect_ratios (List[List[int]]): A list with all the aspect ratios used in each feature map.
min_ratio (float): The minimum scale :math:`\text{s}_{\text{min}}` of the default boxes used in the estimation
of the scales of each feature map. It is used only if the ``scales`` parameter is not provided.
max_ratio (float): The maximum scale :math:`\text{s}_{\text{max}}` of the default boxes used in the estimation
of the scales of each feature map. It is used only if the ``scales`` parameter is not provided.
scales (List[float]], optional): The scales of the default boxes. If not provided it will be estimated using
the ``min_ratio`` and ``max_ratio`` parameters.
steps (List[int]], optional): It's a hyper-parameter that affects the tiling of default boxes. If not provided
it will be estimated from the data.
clip (bool): Whether the standardized values of default boxes should be clipped between 0 and 1. The clipping
is applied while the boxes are encoded in format ``(cx, cy, w, h)``.
"""
def __init__(
self,
aspect_ratios: List[List[int]],
min_ratio: float = 0.15,
max_ratio: float = 0.9,
scales: Optional[List[float]] = None,
steps: Optional[List[int]] = None,
clip: bool = True,
):
super().__init__()
if steps is not None and len(aspect_ratios) != len(steps):
raise ValueError("aspect_ratios and steps should have the same length")
self.aspect_ratios = aspect_ratios
self.steps = steps
self.clip = clip
num_outputs = len(aspect_ratios)
# Estimation of default boxes scales
if scales is None:
if num_outputs > 1:
range_ratio = max_ratio - min_ratio
self.scales = [min_ratio + range_ratio * k / (num_outputs - 1.0) for k in range(num_outputs)]
self.scales.append(1.0)
else:
self.scales = [min_ratio, max_ratio]
else:
self.scales = scales
self._wh_pairs = self._generate_wh_pairs(num_outputs)
def _generate_wh_pairs(
self, num_outputs: int, dtype: torch.dtype = torch.float32, device: torch.device = torch.device("cpu")
) -> List[Tensor]:
_wh_pairs: List[Tensor] = []
for k in range(num_outputs):
# Adding the 2 default width-height pairs for aspect ratio 1 and scale s'k
s_k = self.scales[k]
s_prime_k = math.sqrt(self.scales[k] * self.scales[k + 1])
wh_pairs = [[s_k, s_k], [s_prime_k, s_prime_k]]
# Adding 2 pairs for each aspect ratio of the feature map k
for ar in self.aspect_ratios[k]:
sq_ar = math.sqrt(ar)
w = self.scales[k] * sq_ar
h = self.scales[k] / sq_ar
wh_pairs.extend([[w, h], [h, w]])
_wh_pairs.append(torch.as_tensor(wh_pairs, dtype=dtype, device=device))
return _wh_pairs
def num_anchors_per_location(self) -> List[int]:
# Estimate num of anchors based on aspect ratios: 2 default boxes + 2 * ratios of feaure map.
return [2 + 2 * len(r) for r in self.aspect_ratios]
# Default Boxes calculation based on page 6 of SSD paper
def _grid_default_boxes(
self, grid_sizes: List[List[int]], image_size: List[int], dtype: torch.dtype = torch.float32
) -> Tensor:
default_boxes = []
for k, f_k in enumerate(grid_sizes):
# Now add the default boxes for each width-height pair
if self.steps is not None:
x_f_k = image_size[1] / self.steps[k]
y_f_k = image_size[0] / self.steps[k]
else:
y_f_k, x_f_k = f_k
shifts_x = ((torch.arange(0, f_k[1]) + 0.5) / x_f_k).to(dtype=dtype)
shifts_y = ((torch.arange(0, f_k[0]) + 0.5) / y_f_k).to(dtype=dtype)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij")
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y) * len(self._wh_pairs[k]), dim=-1).reshape(-1, 2)
# Clipping the default boxes while the boxes are encoded in format (cx, cy, w, h)
_wh_pair = self._wh_pairs[k].clamp(min=0, max=1) if self.clip else self._wh_pairs[k]
wh_pairs = _wh_pair.repeat((f_k[0] * f_k[1]), 1)
default_box = torch.cat((shifts, wh_pairs), dim=1)
default_boxes.append(default_box)
return torch.cat(default_boxes, dim=0)
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"aspect_ratios={self.aspect_ratios}"
f", clip={self.clip}"
f", scales={self.scales}"
f", steps={self.steps}"
")"
)
return s
def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]:
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
image_size = image_list.tensors.shape[-2:]
dtype, device = feature_maps[0].dtype, feature_maps[0].device
default_boxes = self._grid_default_boxes(grid_sizes, image_size, dtype=dtype)
default_boxes = default_boxes.to(device)
dboxes = []
x_y_size = torch.tensor([image_size[1], image_size[0]], device=default_boxes.device)
for _ in image_list.image_sizes:
dboxes_in_image = default_boxes
dboxes_in_image = torch.cat(
[
(dboxes_in_image[:, :2] - 0.5 * dboxes_in_image[:, 2:]) * x_y_size,
(dboxes_in_image[:, :2] + 0.5 * dboxes_in_image[:, 2:]) * x_y_size,
],
-1,
)
dboxes.append(dboxes_in_image)
return dboxes
```
|
======================================================================================================================================
SOURCE CODE FILE: backbone_utils.py
LINES: 1
SIZE: 10.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\backbone_utils.py
ENCODING: utf-8
```py
import warnings
from typing import Callable, Dict, List, Optional, Union
from torch import nn, Tensor
from torchvision.ops import misc as misc_nn_ops
from torchvision.ops.feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool
from .. import mobilenet, resnet
from .._api import _get_enum_from_fn, WeightsEnum
from .._utils import handle_legacy_interface, IntermediateLayerGetter
class BackboneWithFPN(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediateLayerGetter apply here.
Args:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(
self,
backbone: nn.Module,
return_layers: Dict[str, str],
in_channels_list: List[int],
out_channels: int,
extra_blocks: Optional[ExtraFPNBlock] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=extra_blocks,
norm_layer=norm_layer,
)
self.out_channels = out_channels
def forward(self, x: Tensor) -> Dict[str, Tensor]:
x = self.body(x)
x = self.fpn(x)
return x
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: _get_enum_from_fn(resnet.__dict__[kwargs["backbone_name"]])["IMAGENET1K_V1"],
),
)
def resnet_fpn_backbone(
*,
backbone_name: str,
weights: Optional[WeightsEnum],
norm_layer: Callable[..., nn.Module] = misc_nn_ops.FrozenBatchNorm2d,
trainable_layers: int = 3,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> BackboneWithFPN:
"""
Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone.
Examples::
>>> import torch
>>> from torchvision.models import ResNet50_Weights
>>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
>>> backbone = resnet_fpn_backbone(backbone_name='resnet50', weights=ResNet50_Weights.DEFAULT, trainable_layers=3)
>>> # get some dummy image
>>> x = torch.rand(1,3,64,64)
>>> # compute the output
>>> output = backbone(x)
>>> print([(k, v.shape) for k, v in output.items()])
>>> # returns
>>> [('0', torch.Size([1, 256, 16, 16])),
>>> ('1', torch.Size([1, 256, 8, 8])),
>>> ('2', torch.Size([1, 256, 4, 4])),
>>> ('3', torch.Size([1, 256, 2, 2])),
>>> ('pool', torch.Size([1, 256, 1, 1]))]
Args:
backbone_name (string): resnet architecture. Possible values are 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
weights (WeightsEnum, optional): The pretrained weights for the model
norm_layer (callable): it is recommended to use the default value. For details visit:
(https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)
trainable_layers (int): number of trainable (not frozen) layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
returned_layers (list of int): The layers of the network to return. Each entry must be in ``[1, 4]``.
By default, all layers are returned.
extra_blocks (ExtraFPNBlock or None): if provided, extra operations will
be performed. It is expected to take the fpn features, the original
features and the names of the original features as input, and returns
a new list of feature maps and their corresponding names. By
default, a ``LastLevelMaxPool`` is used.
"""
backbone = resnet.__dict__[backbone_name](weights=weights, norm_layer=norm_layer)
return _resnet_fpn_extractor(backbone, trainable_layers, returned_layers, extra_blocks)
def _resnet_fpn_extractor(
backbone: resnet.ResNet,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> BackboneWithFPN:
# select layers that won't be frozen
if trainable_layers < 0 or trainable_layers > 5:
raise ValueError(f"Trainable layers should be in the range [0,5], got {trainable_layers}")
layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers]
if trainable_layers == 5:
layers_to_train.append("bn1")
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [1, 2, 3, 4]
if min(returned_layers) <= 0 or max(returned_layers) >= 5:
raise ValueError(f"Each returned layer should be in the range [1,4]. Got {returned_layers}")
return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]
out_channels = 256
return BackboneWithFPN(
backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, norm_layer=norm_layer
)
def _validate_trainable_layers(
is_trained: bool,
trainable_backbone_layers: Optional[int],
max_value: int,
default_value: int,
) -> int:
# don't freeze any layers if pretrained model or backbone is not used
if not is_trained:
if trainable_backbone_layers is not None:
warnings.warn(
"Changing trainable_backbone_layers has no effect if "
"neither pretrained nor pretrained_backbone have been set to True, "
f"falling back to trainable_backbone_layers={max_value} so that all layers are trainable"
)
trainable_backbone_layers = max_value
# by default freeze first blocks
if trainable_backbone_layers is None:
trainable_backbone_layers = default_value
if trainable_backbone_layers < 0 or trainable_backbone_layers > max_value:
raise ValueError(
f"Trainable backbone layers should be in the range [0,{max_value}], got {trainable_backbone_layers} "
)
return trainable_backbone_layers
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: _get_enum_from_fn(mobilenet.__dict__[kwargs["backbone_name"]])["IMAGENET1K_V1"],
),
)
def mobilenet_backbone(
*,
backbone_name: str,
weights: Optional[WeightsEnum],
fpn: bool,
norm_layer: Callable[..., nn.Module] = misc_nn_ops.FrozenBatchNorm2d,
trainable_layers: int = 2,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> nn.Module:
backbone = mobilenet.__dict__[backbone_name](weights=weights, norm_layer=norm_layer)
return _mobilenet_extractor(backbone, fpn, trainable_layers, returned_layers, extra_blocks)
def _mobilenet_extractor(
backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3],
fpn: bool,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> nn.Module:
backbone = backbone.features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
num_stages = len(stage_indices)
# find the index of the layer from which we won't freeze
if trainable_layers < 0 or trainable_layers > num_stages:
raise ValueError(f"Trainable layers should be in the range [0,{num_stages}], got {trainable_layers} ")
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]:
for parameter in b.parameters():
parameter.requires_grad_(False)
out_channels = 256
if fpn:
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [num_stages - 2, num_stages - 1]
if min(returned_layers) < 0 or max(returned_layers) >= num_stages:
raise ValueError(f"Each returned layer should be in the range [0,{num_stages - 1}], got {returned_layers} ")
return_layers = {f"{stage_indices[k]}": str(v) for v, k in enumerate(returned_layers)}
in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers]
return BackboneWithFPN(
backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, norm_layer=norm_layer
)
else:
m = nn.Sequential(
backbone,
# depthwise linear combination of channels to reduce their size
nn.Conv2d(backbone[-1].out_channels, out_channels, 1),
)
m.out_channels = out_channels # type: ignore[assignment]
return m
```
|
===================================================================================================================================
SOURCE CODE FILE: faster_rcnn.py
LINES: 1
SIZE: 36.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\faster_rcnn.py
ENCODING: utf-8
```py
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from ...ops import misc as misc_nn_ops
from ...transforms._presets import ObjectDetection
from .._api import register_model, Weights, WeightsEnum
from .._meta import _COCO_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface
from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights
from ..resnet import resnet50, ResNet50_Weights
from ._utils import overwrite_eps
from .anchor_utils import AnchorGenerator
from .backbone_utils import _mobilenet_extractor, _resnet_fpn_extractor, _validate_trainable_layers
from .generalized_rcnn import GeneralizedRCNN
from .roi_heads import RoIHeads
from .rpn import RegionProposalNetwork, RPNHead
from .transform import GeneralizedRCNNTransform
__all__ = [
"FasterRCNN",
"FasterRCNN_ResNet50_FPN_Weights",
"FasterRCNN_ResNet50_FPN_V2_Weights",
"FasterRCNN_MobileNet_V3_Large_FPN_Weights",
"FasterRCNN_MobileNet_V3_Large_320_FPN_Weights",
"fasterrcnn_resnet50_fpn",
"fasterrcnn_resnet50_fpn_v2",
"fasterrcnn_mobilenet_v3_large_fpn",
"fasterrcnn_mobilenet_v3_large_320_fpn",
]
def _default_anchorgen():
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
return AnchorGenerator(anchor_sizes, aspect_ratios)
class FasterRCNN(GeneralizedRCNN):
"""
Implements Faster R-CNN.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
The model returns a Dict[Tensor] during training, containing the classification and regression
losses for both the RPN and the R-CNN.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or and OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
If box_predictor is specified, num_classes should be None.
min_size (int): Images are rescaled before feeding them to the backbone:
we attempt to preserve the aspect ratio and scale the shorter edge
to ``min_size``. If the resulting longer edge exceeds ``max_size``,
then downscale so that the longer edge does not exceed ``max_size``.
This may result in the shorter edge beeing lower than ``min_size``.
max_size (int): See ``min_size``.
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
rpn_score_thresh (float): only return proposals with an objectness score greater than rpn_score_thresh
box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes
box_head (nn.Module): module that takes the cropped feature maps as input
box_predictor (nn.Module): module that takes the output of box_head and returns the
classification logits and box regression deltas.
box_score_thresh (float): during inference, only return proposals with a classification score
greater than box_score_thresh
box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
box_detections_per_img (int): maximum number of detections per image, for all classes.
box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
considered as positive during training of the classification head
box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
considered as negative during training of the classification head
box_batch_size_per_image (int): number of proposals that are sampled during training of the
classification head
box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
of the classification head
bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
bounding boxes
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import FasterRCNN
>>> from torchvision.models.detection.rpn import AnchorGenerator
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
>>> # FasterRCNN needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280,
>>> # so we need to add it here
>>> backbone.out_channels = 1280
>>>
>>> # let's make the RPN generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
>>> aspect_ratios=((0.5, 1.0, 2.0),))
>>>
>>> # let's define what are the feature maps that we will
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> # put the pieces together inside a FasterRCNN model
>>> model = FasterRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
>>> box_roi_pool=roi_pooler)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
def __init__(
self,
backbone,
num_classes=None,
# transform parameters
min_size=800,
max_size=1333,
image_mean=None,
image_std=None,
# RPN parameters
rpn_anchor_generator=None,
rpn_head=None,
rpn_pre_nms_top_n_train=2000,
rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000,
rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7,
rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256,
rpn_positive_fraction=0.5,
rpn_score_thresh=0.0,
# Box parameters
box_roi_pool=None,
box_head=None,
box_predictor=None,
box_score_thresh=0.05,
box_nms_thresh=0.5,
box_detections_per_img=100,
box_fg_iou_thresh=0.5,
box_bg_iou_thresh=0.5,
box_batch_size_per_image=512,
box_positive_fraction=0.25,
bbox_reg_weights=None,
**kwargs,
):
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)"
)
if not isinstance(rpn_anchor_generator, (AnchorGenerator, type(None))):
raise TypeError(
f"rpn_anchor_generator should be of type AnchorGenerator or None instead of {type(rpn_anchor_generator)}"
)
if not isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None))):
raise TypeError(
f"box_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(box_roi_pool)}"
)
if num_classes is not None:
if box_predictor is not None:
raise ValueError("num_classes should be None when box_predictor is specified")
else:
if box_predictor is None:
raise ValueError("num_classes should not be None when box_predictor is not specified")
out_channels = backbone.out_channels
if rpn_anchor_generator is None:
rpn_anchor_generator = _default_anchorgen()
if rpn_head is None:
rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])
rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
rpn = RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
if box_roi_pool is None:
box_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2)
if box_head is None:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution**2, representation_size)
if box_predictor is None:
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, num_classes)
roi_heads = RoIHeads(
# Box
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std, **kwargs)
super().__init__(backbone, rpn, roi_heads, transform)
class TwoMLPHead(nn.Module):
"""
Standard heads for FPN-based models
Args:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
def __init__(self, in_channels, representation_size):
super().__init__()
self.fc6 = nn.Linear(in_channels, representation_size)
self.fc7 = nn.Linear(representation_size, representation_size)
def forward(self, x):
x = x.flatten(start_dim=1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
class FastRCNNConvFCHead(nn.Sequential):
def __init__(
self,
input_size: Tuple[int, int, int],
conv_layers: List[int],
fc_layers: List[int],
norm_layer: Optional[Callable[..., nn.Module]] = None,
):
"""
Args:
input_size (Tuple[int, int, int]): the input size in CHW format.
conv_layers (list): feature dimensions of each Convolution layer
fc_layers (list): feature dimensions of each FCN layer
norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
"""
in_channels, in_height, in_width = input_size
blocks = []
previous_channels = in_channels
for current_channels in conv_layers:
blocks.append(misc_nn_ops.Conv2dNormActivation(previous_channels, current_channels, norm_layer=norm_layer))
previous_channels = current_channels
blocks.append(nn.Flatten())
previous_channels = previous_channels * in_height * in_width
for current_channels in fc_layers:
blocks.append(nn.Linear(previous_channels, current_channels))
blocks.append(nn.ReLU(inplace=True))
previous_channels = current_channels
super().__init__(*blocks)
for layer in self.modules():
if isinstance(layer, nn.Conv2d):
nn.init.kaiming_normal_(layer.weight, mode="fan_out", nonlinearity="relu")
if layer.bias is not None:
nn.init.zeros_(layer.bias)
class FastRCNNPredictor(nn.Module):
"""
Standard classification + bounding box regression layers
for Fast R-CNN.
Args:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
def __init__(self, in_channels, num_classes):
super().__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
def forward(self, x):
if x.dim() == 4:
torch._assert(
list(x.shape[2:]) == [1, 1],
f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}",
)
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
_COMMON_META = {
"categories": _COCO_CATEGORIES,
"min_size": (1, 1),
}
class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 41755286,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-resnet-50-fpn",
"_metrics": {
"COCO-val2017": {
"box_map": 37.0,
}
},
"_ops": 134.38,
"_file_size": 159.743,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_v2_coco-dd69338a.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 43712278,
"recipe": "https://github.com/pytorch/vision/pull/5763",
"_metrics": {
"COCO-val2017": {
"box_map": 46.7,
}
},
"_ops": 280.371,
"_file_size": 167.104,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
},
)
DEFAULT = COCO_V1
class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 19386354,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-mobilenetv3-large-fpn",
"_metrics": {
"COCO-val2017": {
"box_map": 32.8,
}
},
"_ops": 4.494,
"_file_size": 74.239,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 19386354,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-mobilenetv3-large-320-fpn",
"_metrics": {
"COCO-val2017": {
"box_map": 22.8,
}
},
"_ops": 0.719,
"_file_size": 74.239,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
@register_model()
@handle_legacy_interface(
weights=("pretrained", FasterRCNN_ResNet50_FPN_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def fasterrcnn_resnet50_fpn(
*,
weights: Optional[FasterRCNN_ResNet50_FPN_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> FasterRCNN:
"""
Faster R-CNN model with a ResNet-50-FPN backbone from the `Faster R-CNN: Towards Real-Time Object
Detection with Region Proposal Networks <https://arxiv.org/abs/1506.01497>`__
paper.
.. betastatus:: detection module
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and a targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses for both the RPN and the R-CNN.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each detection
- scores (``Tensor[N]``): the scores of each detection
For more details on the output, you may refer to :ref:`instance_seg_output`.
Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
Example::
>>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT)
>>> # For training
>>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)
>>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]
>>> labels = torch.randint(1, 91, (4, 11))
>>> images = list(image for image in images)
>>> targets = []
>>> for i in range(len(images)):
>>> d = {}
>>> d['boxes'] = boxes[i]
>>> d['labels'] = labels[i]
>>> targets.append(d)
>>> output = model(images, targets)
>>> # For inference
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
>>>
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
Args:
weights (:class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
trainable. If ``None`` is passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/faster_rcnn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights
:members:
"""
weights = FasterRCNN_ResNet50_FPN_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = FasterRCNN(backbone, num_classes=num_classes, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if weights == FasterRCNN_ResNet50_FPN_Weights.COCO_V1:
overwrite_eps(model, 0.0)
return model
@register_model()
@handle_legacy_interface(
weights=("pretrained", FasterRCNN_ResNet50_FPN_V2_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def fasterrcnn_resnet50_fpn_v2(
*,
weights: Optional[FasterRCNN_ResNet50_FPN_V2_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = None,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> FasterRCNN:
"""
Constructs an improved Faster R-CNN model with a ResNet-50-FPN backbone from `Benchmarking Detection
Transfer Learning with Vision Transformers <https://arxiv.org/abs/2111.11429>`__ paper.
.. betastatus:: detection module
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Args:
weights (:class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_V2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_V2_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
trainable. If ``None`` is passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/faster_rcnn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.FasterRCNN_ResNet50_FPN_V2_Weights
:members:
"""
weights = FasterRCNN_ResNet50_FPN_V2_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
backbone = resnet50(weights=weights_backbone, progress=progress)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers, norm_layer=nn.BatchNorm2d)
rpn_anchor_generator = _default_anchorgen()
rpn_head = RPNHead(backbone.out_channels, rpn_anchor_generator.num_anchors_per_location()[0], conv_depth=2)
box_head = FastRCNNConvFCHead(
(backbone.out_channels, 7, 7), [256, 256, 256, 256], [1024], norm_layer=nn.BatchNorm2d
)
model = FasterRCNN(
backbone,
num_classes=num_classes,
rpn_anchor_generator=rpn_anchor_generator,
rpn_head=rpn_head,
box_head=box_head,
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
def _fasterrcnn_mobilenet_v3_large_fpn(
*,
weights: Optional[Union[FasterRCNN_MobileNet_V3_Large_FPN_Weights, FasterRCNN_MobileNet_V3_Large_320_FPN_Weights]],
progress: bool,
num_classes: Optional[int],
weights_backbone: Optional[MobileNet_V3_Large_Weights],
trainable_backbone_layers: Optional[int],
**kwargs: Any,
) -> FasterRCNN:
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 6, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
backbone = mobilenet_v3_large(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
backbone = _mobilenet_extractor(backbone, True, trainable_backbone_layers)
anchor_sizes = (
(
32,
64,
128,
256,
512,
),
) * 3
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
model = FasterRCNN(
backbone, num_classes, rpn_anchor_generator=AnchorGenerator(anchor_sizes, aspect_ratios), **kwargs
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
@register_model()
@handle_legacy_interface(
weights=("pretrained", FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1),
)
def fasterrcnn_mobilenet_v3_large_320_fpn(
*,
weights: Optional[FasterRCNN_MobileNet_V3_Large_320_FPN_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> FasterRCNN:
"""
Low resolution Faster R-CNN model with a MobileNetV3-Large backbone tuned for mobile use cases.
.. betastatus:: detection module
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
weights (:class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_320_FPN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_320_FPN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
pretrained weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
final block. Valid values are between 0 and 6, with 6 meaning all backbone layers are
trainable. If ``None`` is passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/faster_rcnn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_320_FPN_Weights
:members:
"""
weights = FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.verify(weights)
weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone)
defaults = {
"min_size": 320,
"max_size": 640,
"rpn_pre_nms_top_n_test": 150,
"rpn_post_nms_top_n_test": 150,
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights=weights,
progress=progress,
num_classes=num_classes,
weights_backbone=weights_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
@register_model()
@handle_legacy_interface(
weights=("pretrained", FasterRCNN_MobileNet_V3_Large_FPN_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1),
)
def fasterrcnn_mobilenet_v3_large_fpn(
*,
weights: Optional[FasterRCNN_MobileNet_V3_Large_FPN_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> FasterRCNN:
"""
Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.
.. betastatus:: detection module
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(weights=FasterRCNN_MobileNet_V3_Large_FPN_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
weights (:class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_FPN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_FPN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
pretrained weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
final block. Valid values are between 0 and 6, with 6 meaning all backbone layers are
trainable. If ``None`` is passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/faster_rcnn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_FPN_Weights
:members:
"""
weights = FasterRCNN_MobileNet_V3_Large_FPN_Weights.verify(weights)
weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone)
defaults = {
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights=weights,
progress=progress,
num_classes=num_classes,
weights_backbone=weights_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
```
|
============================================================================================================================
SOURCE CODE FILE: fcos.py
LINES: 1
SIZE: 34.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\fcos.py
ENCODING: utf-8
```py
import math
import warnings
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from ...ops import boxes as box_ops, generalized_box_iou_loss, misc as misc_nn_ops, sigmoid_focal_loss
from ...ops.feature_pyramid_network import LastLevelP6P7
from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _COCO_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface
from ..resnet import resnet50, ResNet50_Weights
from . import _utils as det_utils
from .anchor_utils import AnchorGenerator
from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
from .transform import GeneralizedRCNNTransform
__all__ = [
"FCOS",
"FCOS_ResNet50_FPN_Weights",
"fcos_resnet50_fpn",
]
class FCOSHead(nn.Module):
"""
A regression and classification head for use in FCOS.
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_classes (int): number of classes to be predicted
num_convs (Optional[int]): number of conv layer of head. Default: 4.
"""
__annotations__ = {
"box_coder": det_utils.BoxLinearCoder,
}
def __init__(self, in_channels: int, num_anchors: int, num_classes: int, num_convs: Optional[int] = 4) -> None:
super().__init__()
self.box_coder = det_utils.BoxLinearCoder(normalize_by_size=True)
self.classification_head = FCOSClassificationHead(in_channels, num_anchors, num_classes, num_convs)
self.regression_head = FCOSRegressionHead(in_channels, num_anchors, num_convs)
def compute_loss(
self,
targets: List[Dict[str, Tensor]],
head_outputs: Dict[str, Tensor],
anchors: List[Tensor],
matched_idxs: List[Tensor],
) -> Dict[str, Tensor]:
cls_logits = head_outputs["cls_logits"] # [N, HWA, C]
bbox_regression = head_outputs["bbox_regression"] # [N, HWA, 4]
bbox_ctrness = head_outputs["bbox_ctrness"] # [N, HWA, 1]
all_gt_classes_targets = []
all_gt_boxes_targets = []
for targets_per_image, matched_idxs_per_image in zip(targets, matched_idxs):
if len(targets_per_image["labels"]) == 0:
gt_classes_targets = targets_per_image["labels"].new_zeros((len(matched_idxs_per_image),))
gt_boxes_targets = targets_per_image["boxes"].new_zeros((len(matched_idxs_per_image), 4))
else:
gt_classes_targets = targets_per_image["labels"][matched_idxs_per_image.clip(min=0)]
gt_boxes_targets = targets_per_image["boxes"][matched_idxs_per_image.clip(min=0)]
gt_classes_targets[matched_idxs_per_image < 0] = -1 # background
all_gt_classes_targets.append(gt_classes_targets)
all_gt_boxes_targets.append(gt_boxes_targets)
# List[Tensor] to Tensor conversion of `all_gt_boxes_target`, `all_gt_classes_targets` and `anchors`
all_gt_boxes_targets, all_gt_classes_targets, anchors = (
torch.stack(all_gt_boxes_targets),
torch.stack(all_gt_classes_targets),
torch.stack(anchors),
)
# compute foregroud
foregroud_mask = all_gt_classes_targets >= 0
num_foreground = foregroud_mask.sum().item()
# classification loss
gt_classes_targets = torch.zeros_like(cls_logits)
gt_classes_targets[foregroud_mask, all_gt_classes_targets[foregroud_mask]] = 1.0
loss_cls = sigmoid_focal_loss(cls_logits, gt_classes_targets, reduction="sum")
# amp issue: pred_boxes need to convert float
pred_boxes = self.box_coder.decode(bbox_regression, anchors)
# regression loss: GIoU loss
loss_bbox_reg = generalized_box_iou_loss(
pred_boxes[foregroud_mask],
all_gt_boxes_targets[foregroud_mask],
reduction="sum",
)
# ctrness loss
bbox_reg_targets = self.box_coder.encode(anchors, all_gt_boxes_targets)
if len(bbox_reg_targets) == 0:
gt_ctrness_targets = bbox_reg_targets.new_zeros(bbox_reg_targets.size()[:-1])
else:
left_right = bbox_reg_targets[:, :, [0, 2]]
top_bottom = bbox_reg_targets[:, :, [1, 3]]
gt_ctrness_targets = torch.sqrt(
(left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0])
* (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
)
pred_centerness = bbox_ctrness.squeeze(dim=2)
loss_bbox_ctrness = nn.functional.binary_cross_entropy_with_logits(
pred_centerness[foregroud_mask], gt_ctrness_targets[foregroud_mask], reduction="sum"
)
return {
"classification": loss_cls / max(1, num_foreground),
"bbox_regression": loss_bbox_reg / max(1, num_foreground),
"bbox_ctrness": loss_bbox_ctrness / max(1, num_foreground),
}
def forward(self, x: List[Tensor]) -> Dict[str, Tensor]:
cls_logits = self.classification_head(x)
bbox_regression, bbox_ctrness = self.regression_head(x)
return {
"cls_logits": cls_logits,
"bbox_regression": bbox_regression,
"bbox_ctrness": bbox_ctrness,
}
class FCOSClassificationHead(nn.Module):
"""
A classification head for use in FCOS.
Args:
in_channels (int): number of channels of the input feature.
num_anchors (int): number of anchors to be predicted.
num_classes (int): number of classes to be predicted.
num_convs (Optional[int]): number of conv layer. Default: 4.
prior_probability (Optional[float]): probability of prior. Default: 0.01.
norm_layer: Module specifying the normalization layer to use.
"""
def __init__(
self,
in_channels: int,
num_anchors: int,
num_classes: int,
num_convs: int = 4,
prior_probability: float = 0.01,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
if norm_layer is None:
norm_layer = partial(nn.GroupNorm, 32)
conv = []
for _ in range(num_convs):
conv.append(nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1))
conv.append(norm_layer(in_channels))
conv.append(nn.ReLU())
self.conv = nn.Sequential(*conv)
for layer in self.conv.children():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
self.cls_logits = nn.Conv2d(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1)
torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
torch.nn.init.constant_(self.cls_logits.bias, -math.log((1 - prior_probability) / prior_probability))
def forward(self, x: List[Tensor]) -> Tensor:
all_cls_logits = []
for features in x:
cls_logits = self.conv(features)
cls_logits = self.cls_logits(cls_logits)
# Permute classification output from (N, A * K, H, W) to (N, HWA, K).
N, _, H, W = cls_logits.shape
cls_logits = cls_logits.view(N, -1, self.num_classes, H, W)
cls_logits = cls_logits.permute(0, 3, 4, 1, 2)
cls_logits = cls_logits.reshape(N, -1, self.num_classes) # Size=(N, HWA, 4)
all_cls_logits.append(cls_logits)
return torch.cat(all_cls_logits, dim=1)
class FCOSRegressionHead(nn.Module):
"""
A regression head for use in FCOS, which combines regression branch and center-ness branch.
This can obtain better performance.
Reference: `FCOS: A simple and strong anchor-free object detector <https://arxiv.org/abs/2006.09214>`_.
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_convs (Optional[int]): number of conv layer. Default: 4.
norm_layer: Module specifying the normalization layer to use.
"""
def __init__(
self,
in_channels: int,
num_anchors: int,
num_convs: int = 4,
norm_layer: Optional[Callable[..., nn.Module]] = None,
):
super().__init__()
if norm_layer is None:
norm_layer = partial(nn.GroupNorm, 32)
conv = []
for _ in range(num_convs):
conv.append(nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1))
conv.append(norm_layer(in_channels))
conv.append(nn.ReLU())
self.conv = nn.Sequential(*conv)
self.bbox_reg = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1)
self.bbox_ctrness = nn.Conv2d(in_channels, num_anchors * 1, kernel_size=3, stride=1, padding=1)
for layer in [self.bbox_reg, self.bbox_ctrness]:
torch.nn.init.normal_(layer.weight, std=0.01)
torch.nn.init.zeros_(layer.bias)
for layer in self.conv.children():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, std=0.01)
torch.nn.init.zeros_(layer.bias)
def forward(self, x: List[Tensor]) -> Tuple[Tensor, Tensor]:
all_bbox_regression = []
all_bbox_ctrness = []
for features in x:
bbox_feature = self.conv(features)
bbox_regression = nn.functional.relu(self.bbox_reg(bbox_feature))
bbox_ctrness = self.bbox_ctrness(bbox_feature)
# permute bbox regression output from (N, 4 * A, H, W) to (N, HWA, 4).
N, _, H, W = bbox_regression.shape
bbox_regression = bbox_regression.view(N, -1, 4, H, W)
bbox_regression = bbox_regression.permute(0, 3, 4, 1, 2)
bbox_regression = bbox_regression.reshape(N, -1, 4) # Size=(N, HWA, 4)
all_bbox_regression.append(bbox_regression)
# permute bbox ctrness output from (N, 1 * A, H, W) to (N, HWA, 1).
bbox_ctrness = bbox_ctrness.view(N, -1, 1, H, W)
bbox_ctrness = bbox_ctrness.permute(0, 3, 4, 1, 2)
bbox_ctrness = bbox_ctrness.reshape(N, -1, 1)
all_bbox_ctrness.append(bbox_ctrness)
return torch.cat(all_bbox_regression, dim=1), torch.cat(all_bbox_ctrness, dim=1)
class FCOS(nn.Module):
"""
Implements FCOS.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
The model returns a Dict[Tensor] during training, containing the classification, regression
and centerness losses.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores for each prediction
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or an OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
min_size (int): Images are rescaled before feeding them to the backbone:
we attempt to preserve the aspect ratio and scale the shorter edge
to ``min_size``. If the resulting longer edge exceeds ``max_size``,
then downscale so that the longer edge does not exceed ``max_size``.
This may result in the shorter edge beeing lower than ``min_size``.
max_size (int): See ``min_size``.
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps. For FCOS, only set one anchor for per position of each level, the width and height equal to
the stride of feature map, and set aspect ratio = 1.0, so the center of anchor is equivalent to the point
in FCOS paper.
head (nn.Module): Module run on top of the feature pyramid.
Defaults to a module containing a classification and regression module.
center_sampling_radius (int): radius of the "center" of a groundtruth box,
within which all anchor points are labeled positive.
score_thresh (float): Score threshold used for postprocessing the detections.
nms_thresh (float): NMS threshold used for postprocessing the detections.
detections_per_img (int): Number of best detections to keep after NMS.
topk_candidates (int): Number of best detections to keep before NMS.
Example:
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import FCOS
>>> from torchvision.models.detection.anchor_utils import AnchorGenerator
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
>>> # FCOS needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280,
>>> # so we need to add it here
>>> backbone.out_channels = 1280
>>>
>>> # let's make the network generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(
>>> sizes=((8,), (16,), (32,), (64,), (128,)),
>>> aspect_ratios=((1.0,),)
>>> )
>>>
>>> # put the pieces together inside a FCOS model
>>> model = FCOS(
>>> backbone,
>>> num_classes=80,
>>> anchor_generator=anchor_generator,
>>> )
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
__annotations__ = {
"box_coder": det_utils.BoxLinearCoder,
}
def __init__(
self,
backbone: nn.Module,
num_classes: int,
# transform parameters
min_size: int = 800,
max_size: int = 1333,
image_mean: Optional[List[float]] = None,
image_std: Optional[List[float]] = None,
# Anchor parameters
anchor_generator: Optional[AnchorGenerator] = None,
head: Optional[nn.Module] = None,
center_sampling_radius: float = 1.5,
score_thresh: float = 0.2,
nms_thresh: float = 0.6,
detections_per_img: int = 100,
topk_candidates: int = 1000,
**kwargs,
):
super().__init__()
_log_api_usage_once(self)
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)"
)
self.backbone = backbone
if not isinstance(anchor_generator, (AnchorGenerator, type(None))):
raise TypeError(
f"anchor_generator should be of type AnchorGenerator or None, instead got {type(anchor_generator)}"
)
if anchor_generator is None:
anchor_sizes = ((8,), (16,), (32,), (64,), (128,)) # equal to strides of multi-level feature map
aspect_ratios = ((1.0,),) * len(anchor_sizes) # set only one anchor
anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
self.anchor_generator = anchor_generator
if self.anchor_generator.num_anchors_per_location()[0] != 1:
raise ValueError(
f"anchor_generator.num_anchors_per_location()[0] should be 1 instead of {anchor_generator.num_anchors_per_location()[0]}"
)
if head is None:
head = FCOSHead(backbone.out_channels, anchor_generator.num_anchors_per_location()[0], num_classes)
self.head = head
self.box_coder = det_utils.BoxLinearCoder(normalize_by_size=True)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
self.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std, **kwargs)
self.center_sampling_radius = center_sampling_radius
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.detections_per_img = detections_per_img
self.topk_candidates = topk_candidates
# used only on torchscript mode
self._has_warned = False
@torch.jit.unused
def eager_outputs(
self, losses: Dict[str, Tensor], detections: List[Dict[str, Tensor]]
) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:
if self.training:
return losses
return detections
def compute_loss(
self,
targets: List[Dict[str, Tensor]],
head_outputs: Dict[str, Tensor],
anchors: List[Tensor],
num_anchors_per_level: List[int],
) -> Dict[str, Tensor]:
matched_idxs = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
if targets_per_image["boxes"].numel() == 0:
matched_idxs.append(
torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device)
)
continue
gt_boxes = targets_per_image["boxes"]
gt_centers = (gt_boxes[:, :2] + gt_boxes[:, 2:]) / 2 # Nx2
anchor_centers = (anchors_per_image[:, :2] + anchors_per_image[:, 2:]) / 2 # N
anchor_sizes = anchors_per_image[:, 2] - anchors_per_image[:, 0]
# center sampling: anchor point must be close enough to gt center.
pairwise_match = (anchor_centers[:, None, :] - gt_centers[None, :, :]).abs_().max(
dim=2
).values < self.center_sampling_radius * anchor_sizes[:, None]
# compute pairwise distance between N points and M boxes
x, y = anchor_centers.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
x0, y0, x1, y1 = gt_boxes.unsqueeze(dim=0).unbind(dim=2) # (1, M)
pairwise_dist = torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2) # (N, M)
# anchor point must be inside gt
pairwise_match &= pairwise_dist.min(dim=2).values > 0
# each anchor is only responsible for certain scale range.
lower_bound = anchor_sizes * 4
lower_bound[: num_anchors_per_level[0]] = 0
upper_bound = anchor_sizes * 8
upper_bound[-num_anchors_per_level[-1] :] = float("inf")
pairwise_dist = pairwise_dist.max(dim=2).values
pairwise_match &= (pairwise_dist > lower_bound[:, None]) & (pairwise_dist < upper_bound[:, None])
# match the GT box with minimum area, if there are multiple GT matches
gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1]) # N
pairwise_match = pairwise_match.to(torch.float32) * (1e8 - gt_areas[None, :])
min_values, matched_idx = pairwise_match.max(dim=1) # R, per-anchor match
matched_idx[min_values < 1e-5] = -1 # unmatched anchors are assigned -1
matched_idxs.append(matched_idx)
return self.head.compute_loss(targets, head_outputs, anchors, matched_idxs)
def postprocess_detections(
self, head_outputs: Dict[str, List[Tensor]], anchors: List[List[Tensor]], image_shapes: List[Tuple[int, int]]
) -> List[Dict[str, Tensor]]:
class_logits = head_outputs["cls_logits"]
box_regression = head_outputs["bbox_regression"]
box_ctrness = head_outputs["bbox_ctrness"]
num_images = len(image_shapes)
detections: List[Dict[str, Tensor]] = []
for index in range(num_images):
box_regression_per_image = [br[index] for br in box_regression]
logits_per_image = [cl[index] for cl in class_logits]
box_ctrness_per_image = [bc[index] for bc in box_ctrness]
anchors_per_image, image_shape = anchors[index], image_shapes[index]
image_boxes = []
image_scores = []
image_labels = []
for box_regression_per_level, logits_per_level, box_ctrness_per_level, anchors_per_level in zip(
box_regression_per_image, logits_per_image, box_ctrness_per_image, anchors_per_image
):
num_classes = logits_per_level.shape[-1]
# remove low scoring boxes
scores_per_level = torch.sqrt(
torch.sigmoid(logits_per_level) * torch.sigmoid(box_ctrness_per_level)
).flatten()
keep_idxs = scores_per_level > self.score_thresh
scores_per_level = scores_per_level[keep_idxs]
topk_idxs = torch.where(keep_idxs)[0]
# keep only topk scoring predictions
num_topk = det_utils._topk_min(topk_idxs, self.topk_candidates, 0)
scores_per_level, idxs = scores_per_level.topk(num_topk)
topk_idxs = topk_idxs[idxs]
anchor_idxs = torch.div(topk_idxs, num_classes, rounding_mode="floor")
labels_per_level = topk_idxs % num_classes
boxes_per_level = self.box_coder.decode(
box_regression_per_level[anchor_idxs], anchors_per_level[anchor_idxs]
)
boxes_per_level = box_ops.clip_boxes_to_image(boxes_per_level, image_shape)
image_boxes.append(boxes_per_level)
image_scores.append(scores_per_level)
image_labels.append(labels_per_level)
image_boxes = torch.cat(image_boxes, dim=0)
image_scores = torch.cat(image_scores, dim=0)
image_labels = torch.cat(image_labels, dim=0)
# non-maximum suppression
keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh)
keep = keep[: self.detections_per_img]
detections.append(
{
"boxes": image_boxes[keep],
"scores": image_scores[keep],
"labels": image_labels[keep],
}
)
return detections
def forward(
self,
images: List[Tensor],
targets: Optional[List[Dict[str, Tensor]]] = None,
) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:
"""
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
# transform the input
images, targets = self.transform(images, targets)
# Check for degenerate boxes
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
torch._assert(
False,
f"All bounding boxes should have positive height and width. Found invalid box {degen_bb} for target at index {target_idx}.",
)
# get the features from the backbone
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
features = list(features.values())
# compute the fcos heads outputs using the features
head_outputs = self.head(features)
# create the set of anchors
anchors = self.anchor_generator(images, features)
# recover level sizes
num_anchors_per_level = [x.size(2) * x.size(3) for x in features]
losses = {}
detections: List[Dict[str, Tensor]] = []
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level)
else:
# split outputs per level
split_head_outputs: Dict[str, List[Tensor]] = {}
for k in head_outputs:
split_head_outputs[k] = list(head_outputs[k].split(num_anchors_per_level, dim=1))
split_anchors = [list(a.split(num_anchors_per_level)) for a in anchors]
# compute the detections
detections = self.postprocess_detections(split_head_outputs, split_anchors, images.image_sizes)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
if torch.jit.is_scripting():
if not self._has_warned:
warnings.warn("FCOS always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True
return losses, detections
return self.eager_outputs(losses, detections)
class FCOS_ResNet50_FPN_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/fcos_resnet50_fpn_coco-99b0c9b7.pth",
transforms=ObjectDetection,
meta={
"num_params": 32269600,
"categories": _COCO_CATEGORIES,
"min_size": (1, 1),
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#fcos-resnet-50-fpn",
"_metrics": {
"COCO-val2017": {
"box_map": 39.2,
}
},
"_ops": 128.207,
"_file_size": 123.608,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
@register_model()
@handle_legacy_interface(
weights=("pretrained", FCOS_ResNet50_FPN_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def fcos_resnet50_fpn(
*,
weights: Optional[FCOS_ResNet50_FPN_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> FCOS:
"""
Constructs a FCOS model with a ResNet-50-FPN backbone.
.. betastatus:: detection module
Reference: `FCOS: Fully Convolutional One-Stage Object Detection <https://arxiv.org/abs/1904.01355>`_.
`FCOS: A simple and strong anchor-free object detector <https://arxiv.org/abs/2006.09214>`_.
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each detection
- scores (``Tensor[N]``): the scores of each detection
For more details on the output, you may refer to :ref:`instance_seg_output`.
Example:
>>> model = torchvision.models.detection.fcos_resnet50_fpn(weights=FCOS_ResNet50_FPN_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
weights (:class:`~torchvision.models.detection.FCOS_ResNet50_FPN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.FCOS_ResNet50_FPN_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for
the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) resnet layers starting
from final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
trainable. If ``None`` is passed (the default) this value is set to 3. Default: None
**kwargs: parameters passed to the ``torchvision.models.detection.FCOS``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/fcos.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.FCOS_ResNet50_FPN_Weights
:members:
"""
weights = FCOS_ResNet50_FPN_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(
backbone, trainable_backbone_layers, returned_layers=[2, 3, 4], extra_blocks=LastLevelP6P7(256, 256)
)
model = FCOS(backbone, num_classes, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
========================================================================================================================================
SOURCE CODE FILE: generalized_rcnn.py
LINES: 1
SIZE: 4.75 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\generalized_rcnn.py
ENCODING: utf-8
```py
"""
Implements the Generalized R-CNN framework
"""
import warnings
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import nn, Tensor
from ...utils import _log_api_usage_once
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN.
Args:
backbone (nn.Module):
rpn (nn.Module):
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
detections / masks from it.
transform (nn.Module): performs the data transformation from the inputs to feed into
the model
"""
def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None:
super().__init__()
_log_api_usage_once(self)
self.transform = transform
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
# used only on torchscript mode
self._has_warned = False
@torch.jit.unused
def eager_outputs(self, losses, detections):
# type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]]
if self.training:
return losses
return detections
def forward(self, images, targets=None):
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[str, Tensor]]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else:
torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
# Check for degenerate boxes
# TODO: Move this to a function
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
proposals, proposal_losses = self.rpn(images, features, targets)
detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) # type: ignore[operator]
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
if torch.jit.is_scripting():
if not self._has_warned:
warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True
return losses, detections
else:
return self.eager_outputs(losses, detections)
```
|
==================================================================================================================================
SOURCE CODE FILE: image_list.py
LINES: 1
SIZE: 0.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\image_list.py
ENCODING: utf-8
```py
from typing import List, Tuple
import torch
from torch import Tensor
class ImageList:
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
Args:
tensors (tensor): Tensor containing images.
image_sizes (list[tuple[int, int]]): List of Tuples each containing size of images.
"""
def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]) -> None:
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, device: torch.device) -> "ImageList":
cast_tensor = self.tensors.to(device)
return ImageList(cast_tensor, self.image_sizes)
```
|
=====================================================================================================================================
SOURCE CODE FILE: keypoint_rcnn.py
LINES: 1
SIZE: 21.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\keypoint_rcnn.py
ENCODING: utf-8
```py
from typing import Any, Optional
import torch
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from ...ops import misc as misc_nn_ops
from ...transforms._presets import ObjectDetection
from .._api import register_model, Weights, WeightsEnum
from .._meta import _COCO_PERSON_CATEGORIES, _COCO_PERSON_KEYPOINT_NAMES
from .._utils import _ovewrite_value_param, handle_legacy_interface
from ..resnet import resnet50, ResNet50_Weights
from ._utils import overwrite_eps
from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
from .faster_rcnn import FasterRCNN
__all__ = [
"KeypointRCNN",
"KeypointRCNN_ResNet50_FPN_Weights",
"keypointrcnn_resnet50_fpn",
]
class KeypointRCNN(FasterRCNN):
"""
Implements Keypoint R-CNN.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
- keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the
format [x, y, visibility], where visibility=0 means that the keypoint is not visible.
The model returns a Dict[Tensor] during training, containing the classification and regression
losses for both the RPN and the R-CNN, and the keypoint loss.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
- keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or and OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
If box_predictor is specified, num_classes should be None.
min_size (int): Images are rescaled before feeding them to the backbone:
we attempt to preserve the aspect ratio and scale the shorter edge
to ``min_size``. If the resulting longer edge exceeds ``max_size``,
then downscale so that the longer edge does not exceed ``max_size``.
This may result in the shorter edge beeing lower than ``min_size``.
max_size (int): See ``min_size``.
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
rpn_score_thresh (float): only return proposals with an objectness score greater than rpn_score_thresh
box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes
box_head (nn.Module): module that takes the cropped feature maps as input
box_predictor (nn.Module): module that takes the output of box_head and returns the
classification logits and box regression deltas.
box_score_thresh (float): during inference, only return proposals with a classification score
greater than box_score_thresh
box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
box_detections_per_img (int): maximum number of detections per image, for all classes.
box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
considered as positive during training of the classification head
box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
considered as negative during training of the classification head
box_batch_size_per_image (int): number of proposals that are sampled during training of the
classification head
box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
of the classification head
bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
bounding boxes
keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes, which will be used for the keypoint head.
keypoint_head (nn.Module): module that takes the cropped feature maps as input
keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the
heatmap logits
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import KeypointRCNN
>>> from torchvision.models.detection.anchor_utils import AnchorGenerator
>>>
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
>>> # KeypointRCNN needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280,
>>> # so we need to add it here
>>> backbone.out_channels = 1280
>>>
>>> # let's make the RPN generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
>>> aspect_ratios=((0.5, 1.0, 2.0),))
>>>
>>> # let's define what are the feature maps that we will
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=14,
>>> sampling_ratio=2)
>>> # put the pieces together inside a KeypointRCNN model
>>> model = KeypointRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
>>> box_roi_pool=roi_pooler,
>>> keypoint_roi_pool=keypoint_roi_pooler)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
def __init__(
self,
backbone,
num_classes=None,
# transform parameters
min_size=None,
max_size=1333,
image_mean=None,
image_std=None,
# RPN parameters
rpn_anchor_generator=None,
rpn_head=None,
rpn_pre_nms_top_n_train=2000,
rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000,
rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7,
rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256,
rpn_positive_fraction=0.5,
rpn_score_thresh=0.0,
# Box parameters
box_roi_pool=None,
box_head=None,
box_predictor=None,
box_score_thresh=0.05,
box_nms_thresh=0.5,
box_detections_per_img=100,
box_fg_iou_thresh=0.5,
box_bg_iou_thresh=0.5,
box_batch_size_per_image=512,
box_positive_fraction=0.25,
bbox_reg_weights=None,
# keypoint parameters
keypoint_roi_pool=None,
keypoint_head=None,
keypoint_predictor=None,
num_keypoints=None,
**kwargs,
):
if not isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))):
raise TypeError(
"keypoint_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(keypoint_roi_pool)}"
)
if min_size is None:
min_size = (640, 672, 704, 736, 768, 800)
if num_keypoints is not None:
if keypoint_predictor is not None:
raise ValueError("num_keypoints should be None when keypoint_predictor is specified")
else:
num_keypoints = 17
out_channels = backbone.out_channels
if keypoint_roi_pool is None:
keypoint_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=14, sampling_ratio=2)
if keypoint_head is None:
keypoint_layers = tuple(512 for _ in range(8))
keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers)
if keypoint_predictor is None:
keypoint_dim_reduced = 512 # == keypoint_layers[-1]
keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints)
super().__init__(
backbone,
num_classes,
# transform parameters
min_size,
max_size,
image_mean,
image_std,
# RPN-specific parameters
rpn_anchor_generator,
rpn_head,
rpn_pre_nms_top_n_train,
rpn_pre_nms_top_n_test,
rpn_post_nms_top_n_train,
rpn_post_nms_top_n_test,
rpn_nms_thresh,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_score_thresh,
# Box parameters
box_roi_pool,
box_head,
box_predictor,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
**kwargs,
)
self.roi_heads.keypoint_roi_pool = keypoint_roi_pool
self.roi_heads.keypoint_head = keypoint_head
self.roi_heads.keypoint_predictor = keypoint_predictor
class KeypointRCNNHeads(nn.Sequential):
def __init__(self, in_channels, layers):
d = []
next_feature = in_channels
for out_channels in layers:
d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))
d.append(nn.ReLU(inplace=True))
next_feature = out_channels
super().__init__(*d)
for m in self.children():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(m.bias, 0)
class KeypointRCNNPredictor(nn.Module):
def __init__(self, in_channels, num_keypoints):
super().__init__()
input_features = in_channels
deconv_kernel = 4
self.kps_score_lowres = nn.ConvTranspose2d(
input_features,
num_keypoints,
deconv_kernel,
stride=2,
padding=deconv_kernel // 2 - 1,
)
nn.init.kaiming_normal_(self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(self.kps_score_lowres.bias, 0)
self.up_scale = 2
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
return torch.nn.functional.interpolate(
x, scale_factor=float(self.up_scale), mode="bilinear", align_corners=False, recompute_scale_factor=False
)
_COMMON_META = {
"categories": _COCO_PERSON_CATEGORIES,
"keypoint_names": _COCO_PERSON_KEYPOINT_NAMES,
"min_size": (1, 1),
}
class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum):
COCO_LEGACY = Weights(
url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 59137258,
"recipe": "https://github.com/pytorch/vision/issues/1606",
"_metrics": {
"COCO-val2017": {
"box_map": 50.6,
"kp_map": 61.1,
}
},
"_ops": 133.924,
"_file_size": 226.054,
"_docs": """
These weights were produced by following a similar training recipe as on the paper but use a checkpoint
from an early epoch.
""",
},
)
COCO_V1 = Weights(
url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 59137258,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#keypoint-r-cnn",
"_metrics": {
"COCO-val2017": {
"box_map": 54.6,
"kp_map": 65.0,
}
},
"_ops": 137.42,
"_file_size": 226.054,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
@register_model()
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: KeypointRCNN_ResNet50_FPN_Weights.COCO_LEGACY
if kwargs["pretrained"] == "legacy"
else KeypointRCNN_ResNet50_FPN_Weights.COCO_V1,
),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def keypointrcnn_resnet50_fpn(
*,
weights: Optional[KeypointRCNN_ResNet50_FPN_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
num_keypoints: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> KeypointRCNN:
"""
Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.
.. betastatus:: detection module
Reference: `Mask R-CNN <https://arxiv.org/abs/1703.06870>`__.
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
- keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the
format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses for both the RPN and the R-CNN, and the keypoint loss.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detected instances:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each instance
- scores (``Tensor[N]``): the scores or each instance
- keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.
For more details on the output, you may refer to :ref:`instance_seg_output`.
Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
Example::
>>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights=KeypointRCNN_ResNet50_FPN_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
>>>
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
Args:
weights (:class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int, optional): number of output classes of the model (including the background)
num_keypoints (int, optional): number of keypoints
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
.. autoclass:: torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights
:members:
"""
weights = KeypointRCNN_ResNet50_FPN_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
num_keypoints = _ovewrite_value_param("num_keypoints", num_keypoints, len(weights.meta["keypoint_names"]))
else:
if num_classes is None:
num_classes = 2
if num_keypoints is None:
num_keypoints = 17
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if weights == KeypointRCNN_ResNet50_FPN_Weights.COCO_V1:
overwrite_eps(model, 0.0)
return model
```
|
=================================================================================================================================
SOURCE CODE FILE: mask_rcnn.py
LINES: 1
SIZE: 26.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\mask_rcnn.py
ENCODING: utf-8
```py
from collections import OrderedDict
from typing import Any, Callable, Optional
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from ...ops import misc as misc_nn_ops
from ...transforms._presets import ObjectDetection
from .._api import register_model, Weights, WeightsEnum
from .._meta import _COCO_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface
from ..resnet import resnet50, ResNet50_Weights
from ._utils import overwrite_eps
from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
from .faster_rcnn import _default_anchorgen, FasterRCNN, FastRCNNConvFCHead, RPNHead
__all__ = [
"MaskRCNN",
"MaskRCNN_ResNet50_FPN_Weights",
"MaskRCNN_ResNet50_FPN_V2_Weights",
"maskrcnn_resnet50_fpn",
"maskrcnn_resnet50_fpn_v2",
]
class MaskRCNN(FasterRCNN):
"""
Implements Mask R-CNN.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
- masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance
The model returns a Dict[Tensor] during training, containing the classification and regression
losses for both the RPN and the R-CNN, and the mask loss.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
- masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
obtain the final segmentation masks, the soft masks can be thresholded, generally
with a value of 0.5 (mask >= 0.5)
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or and OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
If box_predictor is specified, num_classes should be None.
min_size (int): Images are rescaled before feeding them to the backbone:
we attempt to preserve the aspect ratio and scale the shorter edge
to ``min_size``. If the resulting longer edge exceeds ``max_size``,
then downscale so that the longer edge does not exceed ``max_size``.
This may result in the shorter edge beeing lower than ``min_size``.
max_size (int): See ``min_size``.
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
rpn_score_thresh (float): only return proposals with an objectness score greater than rpn_score_thresh
box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes
box_head (nn.Module): module that takes the cropped feature maps as input
box_predictor (nn.Module): module that takes the output of box_head and returns the
classification logits and box regression deltas.
box_score_thresh (float): during inference, only return proposals with a classification score
greater than box_score_thresh
box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
box_detections_per_img (int): maximum number of detections per image, for all classes.
box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
considered as positive during training of the classification head
box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
considered as negative during training of the classification head
box_batch_size_per_image (int): number of proposals that are sampled during training of the
classification head
box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
of the classification head
bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
bounding boxes
mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes, which will be used for the mask head.
mask_head (nn.Module): module that takes the cropped feature maps as input
mask_predictor (nn.Module): module that takes the output of the mask_head and returns the
segmentation mask logits
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import MaskRCNN
>>> from torchvision.models.detection.anchor_utils import AnchorGenerator
>>>
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
>>> # MaskRCNN needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280
>>> # so we need to add it here,
>>> backbone.out_channels = 1280
>>>
>>> # let's make the RPN generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
>>> aspect_ratios=((0.5, 1.0, 2.0),))
>>>
>>> # let's define what are the feature maps that we will
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=14,
>>> sampling_ratio=2)
>>> # put the pieces together inside a MaskRCNN model
>>> model = MaskRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
>>> box_roi_pool=roi_pooler,
>>> mask_roi_pool=mask_roi_pooler)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
def __init__(
self,
backbone,
num_classes=None,
# transform parameters
min_size=800,
max_size=1333,
image_mean=None,
image_std=None,
# RPN parameters
rpn_anchor_generator=None,
rpn_head=None,
rpn_pre_nms_top_n_train=2000,
rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000,
rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7,
rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256,
rpn_positive_fraction=0.5,
rpn_score_thresh=0.0,
# Box parameters
box_roi_pool=None,
box_head=None,
box_predictor=None,
box_score_thresh=0.05,
box_nms_thresh=0.5,
box_detections_per_img=100,
box_fg_iou_thresh=0.5,
box_bg_iou_thresh=0.5,
box_batch_size_per_image=512,
box_positive_fraction=0.25,
bbox_reg_weights=None,
# Mask parameters
mask_roi_pool=None,
mask_head=None,
mask_predictor=None,
**kwargs,
):
if not isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None))):
raise TypeError(
f"mask_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(mask_roi_pool)}"
)
if num_classes is not None:
if mask_predictor is not None:
raise ValueError("num_classes should be None when mask_predictor is specified")
out_channels = backbone.out_channels
if mask_roi_pool is None:
mask_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=14, sampling_ratio=2)
if mask_head is None:
mask_layers = (256, 256, 256, 256)
mask_dilation = 1
mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation)
if mask_predictor is None:
mask_predictor_in_channels = 256 # == mask_layers[-1]
mask_dim_reduced = 256
mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels, mask_dim_reduced, num_classes)
super().__init__(
backbone,
num_classes,
# transform parameters
min_size,
max_size,
image_mean,
image_std,
# RPN-specific parameters
rpn_anchor_generator,
rpn_head,
rpn_pre_nms_top_n_train,
rpn_pre_nms_top_n_test,
rpn_post_nms_top_n_train,
rpn_post_nms_top_n_test,
rpn_nms_thresh,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_score_thresh,
# Box parameters
box_roi_pool,
box_head,
box_predictor,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
**kwargs,
)
self.roi_heads.mask_roi_pool = mask_roi_pool
self.roi_heads.mask_head = mask_head
self.roi_heads.mask_predictor = mask_predictor
class MaskRCNNHeads(nn.Sequential):
_version = 2
def __init__(self, in_channels, layers, dilation, norm_layer: Optional[Callable[..., nn.Module]] = None):
"""
Args:
in_channels (int): number of input channels
layers (list): feature dimensions of each FCN layer
dilation (int): dilation rate of kernel
norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
"""
blocks = []
next_feature = in_channels
for layer_features in layers:
blocks.append(
misc_nn_ops.Conv2dNormActivation(
next_feature,
layer_features,
kernel_size=3,
stride=1,
padding=dilation,
dilation=dilation,
norm_layer=norm_layer,
)
)
next_feature = layer_features
super().__init__(*blocks)
for layer in self.modules():
if isinstance(layer, nn.Conv2d):
nn.init.kaiming_normal_(layer.weight, mode="fan_out", nonlinearity="relu")
if layer.bias is not None:
nn.init.zeros_(layer.bias)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
num_blocks = len(self)
for i in range(num_blocks):
for type in ["weight", "bias"]:
old_key = f"{prefix}mask_fcn{i+1}.{type}"
new_key = f"{prefix}{i}.0.{type}"
if old_key in state_dict:
state_dict[new_key] = state_dict.pop(old_key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
class MaskRCNNPredictor(nn.Sequential):
def __init__(self, in_channels, dim_reduced, num_classes):
super().__init__(
OrderedDict(
[
("conv5_mask", nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),
("relu", nn.ReLU(inplace=True)),
("mask_fcn_logits", nn.Conv2d(dim_reduced, num_classes, 1, 1, 0)),
]
)
)
for name, param in self.named_parameters():
if "weight" in name:
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
# elif "bias" in name:
# nn.init.constant_(param, 0)
_COMMON_META = {
"categories": _COCO_CATEGORIES,
"min_size": (1, 1),
}
class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 44401393,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#mask-r-cnn",
"_metrics": {
"COCO-val2017": {
"box_map": 37.9,
"mask_map": 34.6,
}
},
"_ops": 134.38,
"_file_size": 169.84,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/maskrcnn_resnet50_fpn_v2_coco-73cbd019.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 46359409,
"recipe": "https://github.com/pytorch/vision/pull/5773",
"_metrics": {
"COCO-val2017": {
"box_map": 47.4,
"mask_map": 41.8,
}
},
"_ops": 333.577,
"_file_size": 177.219,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
},
)
DEFAULT = COCO_V1
@register_model()
@handle_legacy_interface(
weights=("pretrained", MaskRCNN_ResNet50_FPN_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def maskrcnn_resnet50_fpn(
*,
weights: Optional[MaskRCNN_ResNet50_FPN_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> MaskRCNN:
"""Mask R-CNN model with a ResNet-50-FPN backbone from the `Mask R-CNN
<https://arxiv.org/abs/1703.06870>`_ paper.
.. betastatus:: detection module
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
- masks (``UInt8Tensor[N, H, W]``): the segmentation binary masks for each instance
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses for both the RPN and the R-CNN, and the mask loss.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detected instances:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each instance
- scores (``Tensor[N]``): the scores or each instance
- masks (``UInt8Tensor[N, 1, H, W]``): the predicted masks for each instance, in ``0-1`` range. In order to
obtain the final segmentation masks, the soft masks can be thresholded, generally
with a value of 0.5 (``mask >= 0.5``)
For more details on the output and on how to plot the masks, you may refer to :ref:`instance_seg_output`.
Mask R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
Example::
>>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=MaskRCNN_ResNet50_FPN_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
>>>
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11)
Args:
weights (:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
trainable. If ``None`` is passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.mask_rcnn.MaskRCNN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/mask_rcnn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights
:members:
"""
weights = MaskRCNN_ResNet50_FPN_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = MaskRCNN(backbone, num_classes=num_classes, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if weights == MaskRCNN_ResNet50_FPN_Weights.COCO_V1:
overwrite_eps(model, 0.0)
return model
@register_model()
@handle_legacy_interface(
weights=("pretrained", MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def maskrcnn_resnet50_fpn_v2(
*,
weights: Optional[MaskRCNN_ResNet50_FPN_V2_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = None,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> MaskRCNN:
"""Improved Mask R-CNN model with a ResNet-50-FPN backbone from the `Benchmarking Detection Transfer
Learning with Vision Transformers <https://arxiv.org/abs/2111.11429>`_ paper.
.. betastatus:: detection module
:func:`~torchvision.models.detection.maskrcnn_resnet50_fpn` for more details.
Args:
weights (:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
trainable. If ``None`` is passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.mask_rcnn.MaskRCNN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/mask_rcnn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights
:members:
"""
weights = MaskRCNN_ResNet50_FPN_V2_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
backbone = resnet50(weights=weights_backbone, progress=progress)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers, norm_layer=nn.BatchNorm2d)
rpn_anchor_generator = _default_anchorgen()
rpn_head = RPNHead(backbone.out_channels, rpn_anchor_generator.num_anchors_per_location()[0], conv_depth=2)
box_head = FastRCNNConvFCHead(
(backbone.out_channels, 7, 7), [256, 256, 256, 256], [1024], norm_layer=nn.BatchNorm2d
)
mask_head = MaskRCNNHeads(backbone.out_channels, [256, 256, 256, 256], 1, norm_layer=nn.BatchNorm2d)
model = MaskRCNN(
backbone,
num_classes=num_classes,
rpn_anchor_generator=rpn_anchor_generator,
rpn_head=rpn_head,
box_head=box_head,
mask_head=mask_head,
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
=================================================================================================================================
SOURCE CODE FILE: retinanet.py
LINES: 1
SIZE: 37.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\retinanet.py
ENCODING: utf-8
```py
import math
import warnings
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from ...ops import boxes as box_ops, misc as misc_nn_ops, sigmoid_focal_loss
from ...ops.feature_pyramid_network import LastLevelP6P7
from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _COCO_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface
from ..resnet import resnet50, ResNet50_Weights
from . import _utils as det_utils
from ._utils import _box_loss, overwrite_eps
from .anchor_utils import AnchorGenerator
from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
from .transform import GeneralizedRCNNTransform
__all__ = [
"RetinaNet",
"RetinaNet_ResNet50_FPN_Weights",
"RetinaNet_ResNet50_FPN_V2_Weights",
"retinanet_resnet50_fpn",
"retinanet_resnet50_fpn_v2",
]
def _sum(x: List[Tensor]) -> Tensor:
res = x[0]
for i in x[1:]:
res = res + i
return res
def _v1_to_v2_weights(state_dict, prefix):
for i in range(4):
for type in ["weight", "bias"]:
old_key = f"{prefix}conv.{2*i}.{type}"
new_key = f"{prefix}conv.{i}.0.{type}"
if old_key in state_dict:
state_dict[new_key] = state_dict.pop(old_key)
def _default_anchorgen():
anchor_sizes = tuple((x, int(x * 2 ** (1.0 / 3)), int(x * 2 ** (2.0 / 3))) for x in [32, 64, 128, 256, 512])
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
return anchor_generator
class RetinaNetHead(nn.Module):
"""
A regression and classification head for use in RetinaNet.
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_classes (int): number of classes to be predicted
norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
"""
def __init__(self, in_channels, num_anchors, num_classes, norm_layer: Optional[Callable[..., nn.Module]] = None):
super().__init__()
self.classification_head = RetinaNetClassificationHead(
in_channels, num_anchors, num_classes, norm_layer=norm_layer
)
self.regression_head = RetinaNetRegressionHead(in_channels, num_anchors, norm_layer=norm_layer)
def compute_loss(self, targets, head_outputs, anchors, matched_idxs):
# type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Dict[str, Tensor]
return {
"classification": self.classification_head.compute_loss(targets, head_outputs, matched_idxs),
"bbox_regression": self.regression_head.compute_loss(targets, head_outputs, anchors, matched_idxs),
}
def forward(self, x):
# type: (List[Tensor]) -> Dict[str, Tensor]
return {"cls_logits": self.classification_head(x), "bbox_regression": self.regression_head(x)}
class RetinaNetClassificationHead(nn.Module):
"""
A classification head for use in RetinaNet.
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_classes (int): number of classes to be predicted
norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
"""
_version = 2
def __init__(
self,
in_channels,
num_anchors,
num_classes,
prior_probability=0.01,
norm_layer: Optional[Callable[..., nn.Module]] = None,
):
super().__init__()
conv = []
for _ in range(4):
conv.append(misc_nn_ops.Conv2dNormActivation(in_channels, in_channels, norm_layer=norm_layer))
self.conv = nn.Sequential(*conv)
for layer in self.conv.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, std=0.01)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0)
self.cls_logits = nn.Conv2d(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1)
torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
torch.nn.init.constant_(self.cls_logits.bias, -math.log((1 - prior_probability) / prior_probability))
self.num_classes = num_classes
self.num_anchors = num_anchors
# This is to fix using det_utils.Matcher.BETWEEN_THRESHOLDS in TorchScript.
# TorchScript doesn't support class attributes.
# https://github.com/pytorch/vision/pull/1697#issuecomment-630255584
self.BETWEEN_THRESHOLDS = det_utils.Matcher.BETWEEN_THRESHOLDS
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
_v1_to_v2_weights(state_dict, prefix)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def compute_loss(self, targets, head_outputs, matched_idxs):
# type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Tensor
losses = []
cls_logits = head_outputs["cls_logits"]
for targets_per_image, cls_logits_per_image, matched_idxs_per_image in zip(targets, cls_logits, matched_idxs):
# determine only the foreground
foreground_idxs_per_image = matched_idxs_per_image >= 0
num_foreground = foreground_idxs_per_image.sum()
# create the target classification
gt_classes_target = torch.zeros_like(cls_logits_per_image)
gt_classes_target[
foreground_idxs_per_image,
targets_per_image["labels"][matched_idxs_per_image[foreground_idxs_per_image]],
] = 1.0
# find indices for which anchors should be ignored
valid_idxs_per_image = matched_idxs_per_image != self.BETWEEN_THRESHOLDS
# compute the classification loss
losses.append(
sigmoid_focal_loss(
cls_logits_per_image[valid_idxs_per_image],
gt_classes_target[valid_idxs_per_image],
reduction="sum",
)
/ max(1, num_foreground)
)
return _sum(losses) / len(targets)
def forward(self, x):
# type: (List[Tensor]) -> Tensor
all_cls_logits = []
for features in x:
cls_logits = self.conv(features)
cls_logits = self.cls_logits(cls_logits)
# Permute classification output from (N, A * K, H, W) to (N, HWA, K).
N, _, H, W = cls_logits.shape
cls_logits = cls_logits.view(N, -1, self.num_classes, H, W)
cls_logits = cls_logits.permute(0, 3, 4, 1, 2)
cls_logits = cls_logits.reshape(N, -1, self.num_classes) # Size=(N, HWA, 4)
all_cls_logits.append(cls_logits)
return torch.cat(all_cls_logits, dim=1)
class RetinaNetRegressionHead(nn.Module):
"""
A regression head for use in RetinaNet.
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
"""
_version = 2
__annotations__ = {
"box_coder": det_utils.BoxCoder,
}
def __init__(self, in_channels, num_anchors, norm_layer: Optional[Callable[..., nn.Module]] = None):
super().__init__()
conv = []
for _ in range(4):
conv.append(misc_nn_ops.Conv2dNormActivation(in_channels, in_channels, norm_layer=norm_layer))
self.conv = nn.Sequential(*conv)
self.bbox_reg = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1)
torch.nn.init.normal_(self.bbox_reg.weight, std=0.01)
torch.nn.init.zeros_(self.bbox_reg.bias)
for layer in self.conv.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, std=0.01)
if layer.bias is not None:
torch.nn.init.zeros_(layer.bias)
self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self._loss_type = "l1"
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
_v1_to_v2_weights(state_dict, prefix)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def compute_loss(self, targets, head_outputs, anchors, matched_idxs):
# type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Tensor
losses = []
bbox_regression = head_outputs["bbox_regression"]
for targets_per_image, bbox_regression_per_image, anchors_per_image, matched_idxs_per_image in zip(
targets, bbox_regression, anchors, matched_idxs
):
# determine only the foreground indices, ignore the rest
foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0]
num_foreground = foreground_idxs_per_image.numel()
# select only the foreground boxes
matched_gt_boxes_per_image = targets_per_image["boxes"][matched_idxs_per_image[foreground_idxs_per_image]]
bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :]
anchors_per_image = anchors_per_image[foreground_idxs_per_image, :]
# compute the loss
losses.append(
_box_loss(
self._loss_type,
self.box_coder,
anchors_per_image,
matched_gt_boxes_per_image,
bbox_regression_per_image,
)
/ max(1, num_foreground)
)
return _sum(losses) / max(1, len(targets))
def forward(self, x):
# type: (List[Tensor]) -> Tensor
all_bbox_regression = []
for features in x:
bbox_regression = self.conv(features)
bbox_regression = self.bbox_reg(bbox_regression)
# Permute bbox regression output from (N, 4 * A, H, W) to (N, HWA, 4).
N, _, H, W = bbox_regression.shape
bbox_regression = bbox_regression.view(N, -1, 4, H, W)
bbox_regression = bbox_regression.permute(0, 3, 4, 1, 2)
bbox_regression = bbox_regression.reshape(N, -1, 4) # Size=(N, HWA, 4)
all_bbox_regression.append(bbox_regression)
return torch.cat(all_bbox_regression, dim=1)
class RetinaNet(nn.Module):
"""
Implements RetinaNet.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
The model returns a Dict[Tensor] during training, containing the classification and regression
losses.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores for each prediction
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or an OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
min_size (int): Images are rescaled before feeding them to the backbone:
we attempt to preserve the aspect ratio and scale the shorter edge
to ``min_size``. If the resulting longer edge exceeds ``max_size``,
then downscale so that the longer edge does not exceed ``max_size``.
This may result in the shorter edge beeing lower than ``min_size``.
max_size (int): See ``min_size``.
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
head (nn.Module): Module run on top of the feature pyramid.
Defaults to a module containing a classification and regression module.
score_thresh (float): Score threshold used for postprocessing the detections.
nms_thresh (float): NMS threshold used for postprocessing the detections.
detections_per_img (int): Number of best detections to keep after NMS.
fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training.
bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training.
topk_candidates (int): Number of best detections to keep before NMS.
Example:
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import RetinaNet
>>> from torchvision.models.detection.anchor_utils import AnchorGenerator
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
>>> # RetinaNet needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280,
>>> # so we need to add it here
>>> backbone.out_channels = 1280
>>>
>>> # let's make the network generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(
>>> sizes=((32, 64, 128, 256, 512),),
>>> aspect_ratios=((0.5, 1.0, 2.0),)
>>> )
>>>
>>> # put the pieces together inside a RetinaNet model
>>> model = RetinaNet(backbone,
>>> num_classes=2,
>>> anchor_generator=anchor_generator)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
__annotations__ = {
"box_coder": det_utils.BoxCoder,
"proposal_matcher": det_utils.Matcher,
}
def __init__(
self,
backbone,
num_classes,
# transform parameters
min_size=800,
max_size=1333,
image_mean=None,
image_std=None,
# Anchor parameters
anchor_generator=None,
head=None,
proposal_matcher=None,
score_thresh=0.05,
nms_thresh=0.5,
detections_per_img=300,
fg_iou_thresh=0.5,
bg_iou_thresh=0.4,
topk_candidates=1000,
**kwargs,
):
super().__init__()
_log_api_usage_once(self)
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)"
)
self.backbone = backbone
if not isinstance(anchor_generator, (AnchorGenerator, type(None))):
raise TypeError(
f"anchor_generator should be of type AnchorGenerator or None instead of {type(anchor_generator)}"
)
if anchor_generator is None:
anchor_generator = _default_anchorgen()
self.anchor_generator = anchor_generator
if head is None:
head = RetinaNetHead(backbone.out_channels, anchor_generator.num_anchors_per_location()[0], num_classes)
self.head = head
if proposal_matcher is None:
proposal_matcher = det_utils.Matcher(
fg_iou_thresh,
bg_iou_thresh,
allow_low_quality_matches=True,
)
self.proposal_matcher = proposal_matcher
self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
self.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std, **kwargs)
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.detections_per_img = detections_per_img
self.topk_candidates = topk_candidates
# used only on torchscript mode
self._has_warned = False
@torch.jit.unused
def eager_outputs(self, losses, detections):
# type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
if self.training:
return losses
return detections
def compute_loss(self, targets, head_outputs, anchors):
# type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Dict[str, Tensor]
matched_idxs = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
if targets_per_image["boxes"].numel() == 0:
matched_idxs.append(
torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device)
)
continue
match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image)
matched_idxs.append(self.proposal_matcher(match_quality_matrix))
return self.head.compute_loss(targets, head_outputs, anchors, matched_idxs)
def postprocess_detections(self, head_outputs, anchors, image_shapes):
# type: (Dict[str, List[Tensor]], List[List[Tensor]], List[Tuple[int, int]]) -> List[Dict[str, Tensor]]
class_logits = head_outputs["cls_logits"]
box_regression = head_outputs["bbox_regression"]
num_images = len(image_shapes)
detections: List[Dict[str, Tensor]] = []
for index in range(num_images):
box_regression_per_image = [br[index] for br in box_regression]
logits_per_image = [cl[index] for cl in class_logits]
anchors_per_image, image_shape = anchors[index], image_shapes[index]
image_boxes = []
image_scores = []
image_labels = []
for box_regression_per_level, logits_per_level, anchors_per_level in zip(
box_regression_per_image, logits_per_image, anchors_per_image
):
num_classes = logits_per_level.shape[-1]
# remove low scoring boxes
scores_per_level = torch.sigmoid(logits_per_level).flatten()
keep_idxs = scores_per_level > self.score_thresh
scores_per_level = scores_per_level[keep_idxs]
topk_idxs = torch.where(keep_idxs)[0]
# keep only topk scoring predictions
num_topk = det_utils._topk_min(topk_idxs, self.topk_candidates, 0)
scores_per_level, idxs = scores_per_level.topk(num_topk)
topk_idxs = topk_idxs[idxs]
anchor_idxs = torch.div(topk_idxs, num_classes, rounding_mode="floor")
labels_per_level = topk_idxs % num_classes
boxes_per_level = self.box_coder.decode_single(
box_regression_per_level[anchor_idxs], anchors_per_level[anchor_idxs]
)
boxes_per_level = box_ops.clip_boxes_to_image(boxes_per_level, image_shape)
image_boxes.append(boxes_per_level)
image_scores.append(scores_per_level)
image_labels.append(labels_per_level)
image_boxes = torch.cat(image_boxes, dim=0)
image_scores = torch.cat(image_scores, dim=0)
image_labels = torch.cat(image_labels, dim=0)
# non-maximum suppression
keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh)
keep = keep[: self.detections_per_img]
detections.append(
{
"boxes": image_boxes[keep],
"scores": image_scores[keep],
"labels": image_labels[keep],
}
)
return detections
def forward(self, images, targets=None):
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
"Expected target boxes to be a tensor of shape [N, 4].",
)
# get the original image sizes
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
# transform the input
images, targets = self.transform(images, targets)
# Check for degenerate boxes
# TODO: Move this to a function
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
# get the features from the backbone
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
# TODO: Do we want a list or a dict?
features = list(features.values())
# compute the retinanet heads outputs using the features
head_outputs = self.head(features)
# create the set of anchors
anchors = self.anchor_generator(images, features)
losses = {}
detections: List[Dict[str, Tensor]] = []
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors)
else:
# recover level sizes
num_anchors_per_level = [x.size(2) * x.size(3) for x in features]
HW = 0
for v in num_anchors_per_level:
HW += v
HWA = head_outputs["cls_logits"].size(1)
A = HWA // HW
num_anchors_per_level = [hw * A for hw in num_anchors_per_level]
# split outputs per level
split_head_outputs: Dict[str, List[Tensor]] = {}
for k in head_outputs:
split_head_outputs[k] = list(head_outputs[k].split(num_anchors_per_level, dim=1))
split_anchors = [list(a.split(num_anchors_per_level)) for a in anchors]
# compute the detections
detections = self.postprocess_detections(split_head_outputs, split_anchors, images.image_sizes)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
if torch.jit.is_scripting():
if not self._has_warned:
warnings.warn("RetinaNet always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True
return losses, detections
return self.eager_outputs(losses, detections)
_COMMON_META = {
"categories": _COCO_CATEGORIES,
"min_size": (1, 1),
}
class RetinaNet_ResNet50_FPN_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/retinanet_resnet50_fpn_coco-eeacb38b.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 34014999,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#retinanet",
"_metrics": {
"COCO-val2017": {
"box_map": 36.4,
}
},
"_ops": 151.54,
"_file_size": 130.267,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/retinanet_resnet50_fpn_v2_coco-5905b1c5.pth",
transforms=ObjectDetection,
meta={
**_COMMON_META,
"num_params": 38198935,
"recipe": "https://github.com/pytorch/vision/pull/5756",
"_metrics": {
"COCO-val2017": {
"box_map": 41.5,
}
},
"_ops": 152.238,
"_file_size": 146.037,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
},
)
DEFAULT = COCO_V1
@register_model()
@handle_legacy_interface(
weights=("pretrained", RetinaNet_ResNet50_FPN_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def retinanet_resnet50_fpn(
*,
weights: Optional[RetinaNet_ResNet50_FPN_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> RetinaNet:
"""
Constructs a RetinaNet model with a ResNet-50-FPN backbone.
.. betastatus:: detection module
Reference: `Focal Loss for Dense Object Detection <https://arxiv.org/abs/1708.02002>`_.
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each detection
- scores (``Tensor[N]``): the scores of each detection
For more details on the output, you may refer to :ref:`instance_seg_output`.
Example::
>>> model = torchvision.models.detection.retinanet_resnet50_fpn(weights=RetinaNet_ResNet50_FPN_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
weights (:class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for
the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.RetinaNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.RetinaNet_ResNet50_FPN_Weights
:members:
"""
weights = RetinaNet_ResNet50_FPN_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
# skip P2 because it generates too many anchors (according to their paper)
backbone = _resnet_fpn_extractor(
backbone, trainable_backbone_layers, returned_layers=[2, 3, 4], extra_blocks=LastLevelP6P7(256, 256)
)
model = RetinaNet(backbone, num_classes, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if weights == RetinaNet_ResNet50_FPN_Weights.COCO_V1:
overwrite_eps(model, 0.0)
return model
@register_model()
@handle_legacy_interface(
weights=("pretrained", RetinaNet_ResNet50_FPN_V2_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def retinanet_resnet50_fpn_v2(
*,
weights: Optional[RetinaNet_ResNet50_FPN_V2_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[ResNet50_Weights] = None,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> RetinaNet:
"""
Constructs an improved RetinaNet model with a ResNet-50-FPN backbone.
.. betastatus:: detection module
Reference: `Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection
<https://arxiv.org/abs/1912.02424>`_.
:func:`~torchvision.models.detection.retinanet_resnet50_fpn` for more details.
Args:
weights (:class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_V2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_V2_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for
the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
**kwargs: parameters passed to the ``torchvision.models.detection.RetinaNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.RetinaNet_ResNet50_FPN_V2_Weights
:members:
"""
weights = RetinaNet_ResNet50_FPN_V2_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
is_trained = weights is not None or weights_backbone is not None
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
backbone = resnet50(weights=weights_backbone, progress=progress)
backbone = _resnet_fpn_extractor(
backbone, trainable_backbone_layers, returned_layers=[2, 3, 4], extra_blocks=LastLevelP6P7(2048, 256)
)
anchor_generator = _default_anchorgen()
head = RetinaNetHead(
backbone.out_channels,
anchor_generator.num_anchors_per_location()[0],
num_classes,
norm_layer=partial(nn.GroupNorm, 32),
)
head.regression_head._loss_type = "giou"
model = RetinaNet(backbone, num_classes, anchor_generator=anchor_generator, head=head, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
=================================================================================================================================
SOURCE CODE FILE: roi_heads.py
LINES: 1
SIZE: 33.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\roi_heads.py
ENCODING: utf-8
```py
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
import torchvision
from torch import nn, Tensor
from torchvision.ops import boxes as box_ops, roi_align
from . import _utils as det_utils
def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
# type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]
"""
Computes the loss for Faster R-CNN.
Args:
class_logits (Tensor)
box_regression (Tensor)
labels (list[BoxList])
regression_targets (Tensor)
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
classification_loss = F.cross_entropy(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.where(labels > 0)[0]
labels_pos = labels[sampled_pos_inds_subset]
N, num_classes = class_logits.shape
box_regression = box_regression.reshape(N, box_regression.size(-1) // 4, 4)
box_loss = F.smooth_l1_loss(
box_regression[sampled_pos_inds_subset, labels_pos],
regression_targets[sampled_pos_inds_subset],
beta=1 / 9,
reduction="sum",
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def maskrcnn_inference(x, labels):
# type: (Tensor, List[Tensor]) -> List[Tensor]
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
Args:
x (Tensor): the mask logits
labels (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
# select masks corresponding to the predicted classes
num_masks = x.shape[0]
boxes_per_image = [label.shape[0] for label in labels]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
return mask_prob
def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M):
# type: (Tensor, Tensor, Tensor, int) -> Tensor
"""
Given segmentation masks and the bounding boxes corresponding
to the location of the masks in the image, this function
crops and resizes the masks in the position defined by the
boxes. This prepares the masks for them to be fed to the
loss computation as the targets.
"""
matched_idxs = matched_idxs.to(boxes)
rois = torch.cat([matched_idxs[:, None], boxes], dim=1)
gt_masks = gt_masks[:, None].to(rois)
return roi_align(gt_masks, rois, (M, M), 1.0)[:, 0]
def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs):
# type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor
"""
Args:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Return:
mask_loss (Tensor): scalar tensor containing the loss
"""
discretization_size = mask_logits.shape[-1]
labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)]
mask_targets = [
project_masks_on_boxes(m, p, i, discretization_size) for m, p, i in zip(gt_masks, proposals, mask_matched_idxs)
]
labels = torch.cat(labels, dim=0)
mask_targets = torch.cat(mask_targets, dim=0)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if mask_targets.numel() == 0:
return mask_logits.sum() * 0
mask_loss = F.binary_cross_entropy_with_logits(
mask_logits[torch.arange(labels.shape[0], device=labels.device), labels], mask_targets
)
return mask_loss
def keypoints_to_heatmap(keypoints, rois, heatmap_size):
# type: (Tensor, Tensor, int) -> Tuple[Tensor, Tensor]
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
def _onnx_heatmaps_to_keypoints(
maps, maps_i, roi_map_width, roi_map_height, widths_i, heights_i, offset_x_i, offset_y_i
):
num_keypoints = torch.scalar_tensor(maps.size(1), dtype=torch.int64)
width_correction = widths_i / roi_map_width
height_correction = heights_i / roi_map_height
roi_map = F.interpolate(
maps_i[:, None], size=(int(roi_map_height), int(roi_map_width)), mode="bicubic", align_corners=False
)[:, 0]
w = torch.scalar_tensor(roi_map.size(2), dtype=torch.int64)
pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1)
x_int = pos % w
y_int = (pos - x_int) // w
x = (torch.tensor(0.5, dtype=torch.float32) + x_int.to(dtype=torch.float32)) * width_correction.to(
dtype=torch.float32
)
y = (torch.tensor(0.5, dtype=torch.float32) + y_int.to(dtype=torch.float32)) * height_correction.to(
dtype=torch.float32
)
xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32)
xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32)
xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32)
xy_preds_i = torch.stack(
[
xy_preds_i_0.to(dtype=torch.float32),
xy_preds_i_1.to(dtype=torch.float32),
xy_preds_i_2.to(dtype=torch.float32),
],
0,
)
# TODO: simplify when indexing without rank will be supported by ONNX
base = num_keypoints * num_keypoints + num_keypoints + 1
ind = torch.arange(num_keypoints)
ind = ind.to(dtype=torch.int64) * base
end_scores_i = (
roi_map.index_select(1, y_int.to(dtype=torch.int64))
.index_select(2, x_int.to(dtype=torch.int64))
.view(-1)
.index_select(0, ind.to(dtype=torch.int64))
)
return xy_preds_i, end_scores_i
@torch.jit._script_if_tracing
def _onnx_heatmaps_to_keypoints_loop(
maps, rois, widths_ceil, heights_ceil, widths, heights, offset_x, offset_y, num_keypoints
):
xy_preds = torch.zeros((0, 3, int(num_keypoints)), dtype=torch.float32, device=maps.device)
end_scores = torch.zeros((0, int(num_keypoints)), dtype=torch.float32, device=maps.device)
for i in range(int(rois.size(0))):
xy_preds_i, end_scores_i = _onnx_heatmaps_to_keypoints(
maps, maps[i], widths_ceil[i], heights_ceil[i], widths[i], heights[i], offset_x[i], offset_y[i]
)
xy_preds = torch.cat((xy_preds.to(dtype=torch.float32), xy_preds_i.unsqueeze(0).to(dtype=torch.float32)), 0)
end_scores = torch.cat(
(end_scores.to(dtype=torch.float32), end_scores_i.to(dtype=torch.float32).unsqueeze(0)), 0
)
return xy_preds, end_scores
def heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = widths.clamp(min=1)
heights = heights.clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_keypoints = maps.shape[1]
if torchvision._is_tracing():
xy_preds, end_scores = _onnx_heatmaps_to_keypoints_loop(
maps,
rois,
widths_ceil,
heights_ceil,
widths,
heights,
offset_x,
offset_y,
torch.scalar_tensor(num_keypoints, dtype=torch.int64),
)
return xy_preds.permute(0, 2, 1), end_scores
xy_preds = torch.zeros((len(rois), 3, num_keypoints), dtype=torch.float32, device=maps.device)
end_scores = torch.zeros((len(rois), num_keypoints), dtype=torch.float32, device=maps.device)
for i in range(len(rois)):
roi_map_width = int(widths_ceil[i].item())
roi_map_height = int(heights_ceil[i].item())
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = F.interpolate(
maps[i][:, None], size=(roi_map_height, roi_map_width), mode="bicubic", align_corners=False
)[:, 0]
# roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1)
x_int = pos % w
y_int = torch.div(pos - x_int, w, rounding_mode="floor")
# assert (roi_map_probs[k, y_int, x_int] ==
# roi_map_probs[k, :, :].max())
x = (x_int.float() + 0.5) * width_correction
y = (y_int.float() + 0.5) * height_correction
xy_preds[i, 0, :] = x + offset_x[i]
xy_preds[i, 1, :] = y + offset_y[i]
xy_preds[i, 2, :] = 1
end_scores[i, :] = roi_map[torch.arange(num_keypoints, device=roi_map.device), y_int, x_int]
return xy_preds.permute(0, 2, 1), end_scores
def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs):
# type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor
N, K, H, W = keypoint_logits.shape
if H != W:
raise ValueError(
f"keypoint_logits height and width (last two elements of shape) should be equal. Instead got H = {H} and W = {W}"
)
discretization_size = H
heatmaps = []
valid = []
for proposals_per_image, gt_kp_in_image, midx in zip(proposals, gt_keypoints, keypoint_matched_idxs):
kp = gt_kp_in_image[midx]
heatmaps_per_image, valid_per_image = keypoints_to_heatmap(kp, proposals_per_image, discretization_size)
heatmaps.append(heatmaps_per_image.view(-1))
valid.append(valid_per_image.view(-1))
keypoint_targets = torch.cat(heatmaps, dim=0)
valid = torch.cat(valid, dim=0).to(dtype=torch.uint8)
valid = torch.where(valid)[0]
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it sepaartely
if keypoint_targets.numel() == 0 or len(valid) == 0:
return keypoint_logits.sum() * 0
keypoint_logits = keypoint_logits.view(N * K, H * W)
keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid])
return keypoint_loss
def keypointrcnn_inference(x, boxes):
# type: (Tensor, List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
kp_probs = []
kp_scores = []
boxes_per_image = [box.size(0) for box in boxes]
x2 = x.split(boxes_per_image, dim=0)
for xx, bb in zip(x2, boxes):
kp_prob, scores = heatmaps_to_keypoints(xx, bb)
kp_probs.append(kp_prob)
kp_scores.append(scores)
return kp_probs, kp_scores
def _onnx_expand_boxes(boxes, scale):
# type: (Tensor, float) -> Tensor
w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
w_half = w_half.to(dtype=torch.float32) * scale
h_half = h_half.to(dtype=torch.float32) * scale
boxes_exp0 = x_c - w_half
boxes_exp1 = y_c - h_half
boxes_exp2 = x_c + w_half
boxes_exp3 = y_c + h_half
boxes_exp = torch.stack((boxes_exp0, boxes_exp1, boxes_exp2, boxes_exp3), 1)
return boxes_exp
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily for paste_mask_in_image
def expand_boxes(boxes, scale):
# type: (Tensor, float) -> Tensor
if torchvision._is_tracing():
return _onnx_expand_boxes(boxes, scale)
w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
w_half *= scale
h_half *= scale
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
@torch.jit.unused
def expand_masks_tracing_scale(M, padding):
# type: (int, int) -> float
return torch.tensor(M + 2 * padding).to(torch.float32) / torch.tensor(M).to(torch.float32)
def expand_masks(mask, padding):
# type: (Tensor, int) -> Tuple[Tensor, float]
M = mask.shape[-1]
if torch._C._get_tracing_state(): # could not import is_tracing(), not sure why
scale = expand_masks_tracing_scale(M, padding)
else:
scale = float(M + 2 * padding) / M
padded_mask = F.pad(mask, (padding,) * 4)
return padded_mask, scale
def paste_mask_in_image(mask, box, im_h, im_w):
# type: (Tensor, Tensor, int, int) -> Tensor
TO_REMOVE = 1
w = int(box[2] - box[0] + TO_REMOVE)
h = int(box[3] - box[1] + TO_REMOVE)
w = max(w, 1)
h = max(h, 1)
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, -1, -1))
# Resize mask
mask = F.interpolate(mask, size=(h, w), mode="bilinear", align_corners=False)
mask = mask[0][0]
im_mask = torch.zeros((im_h, im_w), dtype=mask.dtype, device=mask.device)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, im_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])]
return im_mask
def _onnx_paste_mask_in_image(mask, box, im_h, im_w):
one = torch.ones(1, dtype=torch.int64)
zero = torch.zeros(1, dtype=torch.int64)
w = box[2] - box[0] + one
h = box[3] - box[1] + one
w = torch.max(torch.cat((w, one)))
h = torch.max(torch.cat((h, one)))
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, mask.size(0), mask.size(1)))
# Resize mask
mask = F.interpolate(mask, size=(int(h), int(w)), mode="bilinear", align_corners=False)
mask = mask[0][0]
x_0 = torch.max(torch.cat((box[0].unsqueeze(0), zero)))
x_1 = torch.min(torch.cat((box[2].unsqueeze(0) + one, im_w.unsqueeze(0))))
y_0 = torch.max(torch.cat((box[1].unsqueeze(0), zero)))
y_1 = torch.min(torch.cat((box[3].unsqueeze(0) + one, im_h.unsqueeze(0))))
unpaded_im_mask = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])]
# TODO : replace below with a dynamic padding when support is added in ONNX
# pad y
zeros_y0 = torch.zeros(y_0, unpaded_im_mask.size(1))
zeros_y1 = torch.zeros(im_h - y_1, unpaded_im_mask.size(1))
concat_0 = torch.cat((zeros_y0, unpaded_im_mask.to(dtype=torch.float32), zeros_y1), 0)[0:im_h, :]
# pad x
zeros_x0 = torch.zeros(concat_0.size(0), x_0)
zeros_x1 = torch.zeros(concat_0.size(0), im_w - x_1)
im_mask = torch.cat((zeros_x0, concat_0, zeros_x1), 1)[:, :im_w]
return im_mask
@torch.jit._script_if_tracing
def _onnx_paste_masks_in_image_loop(masks, boxes, im_h, im_w):
res_append = torch.zeros(0, im_h, im_w)
for i in range(masks.size(0)):
mask_res = _onnx_paste_mask_in_image(masks[i][0], boxes[i], im_h, im_w)
mask_res = mask_res.unsqueeze(0)
res_append = torch.cat((res_append, mask_res))
return res_append
def paste_masks_in_image(masks, boxes, img_shape, padding=1):
# type: (Tensor, Tensor, Tuple[int, int], int) -> Tensor
masks, scale = expand_masks(masks, padding=padding)
boxes = expand_boxes(boxes, scale).to(dtype=torch.int64)
im_h, im_w = img_shape
if torchvision._is_tracing():
return _onnx_paste_masks_in_image_loop(
masks, boxes, torch.scalar_tensor(im_h, dtype=torch.int64), torch.scalar_tensor(im_w, dtype=torch.int64)
)[:, None]
res = [paste_mask_in_image(m[0], b, im_h, im_w) for m, b in zip(masks, boxes)]
if len(res) > 0:
ret = torch.stack(res, dim=0)[:, None]
else:
ret = masks.new_empty((0, 1, im_h, im_w))
return ret
class RoIHeads(nn.Module):
__annotations__ = {
"box_coder": det_utils.BoxCoder,
"proposal_matcher": det_utils.Matcher,
"fg_bg_sampler": det_utils.BalancedPositiveNegativeSampler,
}
def __init__(
self,
box_roi_pool,
box_head,
box_predictor,
# Faster R-CNN training
fg_iou_thresh,
bg_iou_thresh,
batch_size_per_image,
positive_fraction,
bbox_reg_weights,
# Faster R-CNN inference
score_thresh,
nms_thresh,
detections_per_img,
# Mask
mask_roi_pool=None,
mask_head=None,
mask_predictor=None,
keypoint_roi_pool=None,
keypoint_head=None,
keypoint_predictor=None,
):
super().__init__()
self.box_similarity = box_ops.box_iou
# assign ground-truth boxes for each proposal
self.proposal_matcher = det_utils.Matcher(fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=False)
self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(batch_size_per_image, positive_fraction)
if bbox_reg_weights is None:
bbox_reg_weights = (10.0, 10.0, 5.0, 5.0)
self.box_coder = det_utils.BoxCoder(bbox_reg_weights)
self.box_roi_pool = box_roi_pool
self.box_head = box_head
self.box_predictor = box_predictor
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.detections_per_img = detections_per_img
self.mask_roi_pool = mask_roi_pool
self.mask_head = mask_head
self.mask_predictor = mask_predictor
self.keypoint_roi_pool = keypoint_roi_pool
self.keypoint_head = keypoint_head
self.keypoint_predictor = keypoint_predictor
def has_mask(self):
if self.mask_roi_pool is None:
return False
if self.mask_head is None:
return False
if self.mask_predictor is None:
return False
return True
def has_keypoint(self):
if self.keypoint_roi_pool is None:
return False
if self.keypoint_head is None:
return False
if self.keypoint_predictor is None:
return False
return True
def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels):
# type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
matched_idxs = []
labels = []
for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels):
if gt_boxes_in_image.numel() == 0:
# Background image
device = proposals_in_image.device
clamped_matched_idxs_in_image = torch.zeros(
(proposals_in_image.shape[0],), dtype=torch.int64, device=device
)
labels_in_image = torch.zeros((proposals_in_image.shape[0],), dtype=torch.int64, device=device)
else:
# set to self.box_similarity when https://github.com/pytorch/pytorch/issues/27495 lands
match_quality_matrix = box_ops.box_iou(gt_boxes_in_image, proposals_in_image)
matched_idxs_in_image = self.proposal_matcher(match_quality_matrix)
clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0)
labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image]
labels_in_image = labels_in_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD
labels_in_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS
labels_in_image[ignore_inds] = -1 # -1 is ignored by sampler
matched_idxs.append(clamped_matched_idxs_in_image)
labels.append(labels_in_image)
return matched_idxs, labels
def subsample(self, labels):
# type: (List[Tensor]) -> List[Tensor]
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_inds = []
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)):
img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0]
sampled_inds.append(img_sampled_inds)
return sampled_inds
def add_gt_proposals(self, proposals, gt_boxes):
# type: (List[Tensor], List[Tensor]) -> List[Tensor]
proposals = [torch.cat((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes)]
return proposals
def check_targets(self, targets):
# type: (Optional[List[Dict[str, Tensor]]]) -> None
if targets is None:
raise ValueError("targets should not be None")
if not all(["boxes" in t for t in targets]):
raise ValueError("Every element of targets should have a boxes key")
if not all(["labels" in t for t in targets]):
raise ValueError("Every element of targets should have a labels key")
if self.has_mask():
if not all(["masks" in t for t in targets]):
raise ValueError("Every element of targets should have a masks key")
def select_training_samples(
self,
proposals, # type: List[Tensor]
targets, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]
self.check_targets(targets)
if targets is None:
raise ValueError("targets should not be None")
dtype = proposals[0].dtype
device = proposals[0].device
gt_boxes = [t["boxes"].to(dtype) for t in targets]
gt_labels = [t["labels"] for t in targets]
# append ground-truth bboxes to propos
proposals = self.add_gt_proposals(proposals, gt_boxes)
# get matching gt indices for each proposal
matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)
# sample a fixed proportion of positive-negative proposals
sampled_inds = self.subsample(labels)
matched_gt_boxes = []
num_images = len(proposals)
for img_id in range(num_images):
img_sampled_inds = sampled_inds[img_id]
proposals[img_id] = proposals[img_id][img_sampled_inds]
labels[img_id] = labels[img_id][img_sampled_inds]
matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds]
gt_boxes_in_image = gt_boxes[img_id]
if gt_boxes_in_image.numel() == 0:
gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device)
matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]])
regression_targets = self.box_coder.encode(matched_gt_boxes, proposals)
return proposals, matched_idxs, labels, regression_targets
def postprocess_detections(
self,
class_logits, # type: Tensor
box_regression, # type: Tensor
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
):
# type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]
device = class_logits.device
num_classes = class_logits.shape[-1]
boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals]
pred_boxes = self.box_coder.decode(box_regression, proposals)
pred_scores = F.softmax(class_logits, -1)
pred_boxes_list = pred_boxes.split(boxes_per_image, 0)
pred_scores_list = pred_scores.split(boxes_per_image, 0)
all_boxes = []
all_scores = []
all_labels = []
for boxes, scores, image_shape in zip(pred_boxes_list, pred_scores_list, image_shapes):
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# create labels for each prediction
labels = torch.arange(num_classes, device=device)
labels = labels.view(1, -1).expand_as(scores)
# remove predictions with the background label
boxes = boxes[:, 1:]
scores = scores[:, 1:]
labels = labels[:, 1:]
# batch everything, by making every class prediction be a separate instance
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
labels = labels.reshape(-1)
# remove low scoring boxes
inds = torch.where(scores > self.score_thresh)[0]
boxes, scores, labels = boxes[inds], scores[inds], labels[inds]
# remove empty boxes
keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.detections_per_img]
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
all_boxes.append(boxes)
all_scores.append(scores)
all_labels.append(labels)
return all_boxes, all_scores, all_labels
def forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Args:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
targets (List[Dict])
"""
if targets is not None:
for t in targets:
# TODO: https://github.com/pytorch/pytorch/issues/26731
floating_point_types = (torch.float, torch.double, torch.half)
if not t["boxes"].dtype in floating_point_types:
raise TypeError(f"target boxes must of float type, instead got {t['boxes'].dtype}")
if not t["labels"].dtype == torch.int64:
raise TypeError(f"target labels must of int64 type, instead got {t['labels'].dtype}")
if self.has_keypoint():
if not t["keypoints"].dtype == torch.float32:
raise TypeError(f"target keypoints must of float type, instead got {t['keypoints'].dtype}")
if self.training:
proposals, matched_idxs, labels, regression_targets = self.select_training_samples(proposals, targets)
else:
labels = None
regression_targets = None
matched_idxs = None
box_features = self.box_roi_pool(features, proposals, image_shapes)
box_features = self.box_head(box_features)
class_logits, box_regression = self.box_predictor(box_features)
result: List[Dict[str, torch.Tensor]] = []
losses = {}
if self.training:
if labels is None:
raise ValueError("labels cannot be None")
if regression_targets is None:
raise ValueError("regression_targets cannot be None")
loss_classifier, loss_box_reg = fastrcnn_loss(class_logits, box_regression, labels, regression_targets)
losses = {"loss_classifier": loss_classifier, "loss_box_reg": loss_box_reg}
else:
boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes)
num_images = len(boxes)
for i in range(num_images):
result.append(
{
"boxes": boxes[i],
"labels": labels[i],
"scores": scores[i],
}
)
if self.has_mask():
mask_proposals = [p["boxes"] for p in result]
if self.training:
if matched_idxs is None:
raise ValueError("if in training, matched_idxs should not be None")
# during training, only focus on positive boxes
num_images = len(proposals)
mask_proposals = []
pos_matched_idxs = []
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
mask_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
if self.mask_roi_pool is not None:
mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes)
mask_features = self.mask_head(mask_features)
mask_logits = self.mask_predictor(mask_features)
else:
raise Exception("Expected mask_roi_pool to be not None")
loss_mask = {}
if self.training:
if targets is None or pos_matched_idxs is None or mask_logits is None:
raise ValueError("targets, pos_matched_idxs, mask_logits cannot be None when training")
gt_masks = [t["masks"] for t in targets]
gt_labels = [t["labels"] for t in targets]
rcnn_loss_mask = maskrcnn_loss(mask_logits, mask_proposals, gt_masks, gt_labels, pos_matched_idxs)
loss_mask = {"loss_mask": rcnn_loss_mask}
else:
labels = [r["labels"] for r in result]
masks_probs = maskrcnn_inference(mask_logits, labels)
for mask_prob, r in zip(masks_probs, result):
r["masks"] = mask_prob
losses.update(loss_mask)
# keep none checks in if conditional so torchscript will conditionally
# compile each branch
if (
self.keypoint_roi_pool is not None
and self.keypoint_head is not None
and self.keypoint_predictor is not None
):
keypoint_proposals = [p["boxes"] for p in result]
if self.training:
# during training, only focus on positive boxes
num_images = len(proposals)
keypoint_proposals = []
pos_matched_idxs = []
if matched_idxs is None:
raise ValueError("if in trainning, matched_idxs should not be None")
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
keypoint_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes)
keypoint_features = self.keypoint_head(keypoint_features)
keypoint_logits = self.keypoint_predictor(keypoint_features)
loss_keypoint = {}
if self.training:
if targets is None or pos_matched_idxs is None:
raise ValueError("both targets and pos_matched_idxs should not be None when in training mode")
gt_keypoints = [t["keypoints"] for t in targets]
rcnn_loss_keypoint = keypointrcnn_loss(
keypoint_logits, keypoint_proposals, gt_keypoints, pos_matched_idxs
)
loss_keypoint = {"loss_keypoint": rcnn_loss_keypoint}
else:
if keypoint_logits is None or keypoint_proposals is None:
raise ValueError(
"both keypoint_logits and keypoint_proposals should not be None when not in training mode"
)
keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals)
for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result):
r["keypoints"] = keypoint_prob
r["keypoints_scores"] = kps
losses.update(loss_keypoint)
return result, losses
```
|
===========================================================================================================================
SOURCE CODE FILE: rpn.py
LINES: 1
SIZE: 15.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\rpn.py
ENCODING: utf-8
```py
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torchvision.ops import boxes as box_ops, Conv2dNormActivation
from . import _utils as det_utils
# Import AnchorGenerator to keep compatibility.
from .anchor_utils import AnchorGenerator # noqa: 401
from .image_list import ImageList
class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
conv_depth (int, optional): number of convolutions
"""
_version = 2
def __init__(self, in_channels: int, num_anchors: int, conv_depth=1) -> None:
super().__init__()
convs = []
for _ in range(conv_depth):
convs.append(Conv2dNormActivation(in_channels, in_channels, kernel_size=3, norm_layer=None))
self.conv = nn.Sequential(*convs)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1, stride=1)
for layer in self.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore[arg-type]
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0) # type: ignore[arg-type]
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
for type in ["weight", "bias"]:
old_key = f"{prefix}conv.{type}"
new_key = f"{prefix}conv.0.0.{type}"
if old_key in state_dict:
state_dict[new_key] = state_dict.pop(old_key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, x: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:
logits = []
bbox_reg = []
for feature in x:
t = self.conv(feature)
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return logits, bbox_reg
def permute_and_flatten(layer: Tensor, N: int, A: int, C: int, H: int, W: int) -> Tensor:
layer = layer.view(N, -1, C, H, W)
layer = layer.permute(0, 3, 4, 1, 2)
layer = layer.reshape(N, -1, C)
return layer
def concat_box_prediction_layers(box_cls: List[Tensor], box_regression: List[Tensor]) -> Tuple[Tensor, Tensor]:
box_cls_flattened = []
box_regression_flattened = []
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_regression
for box_cls_per_level, box_regression_per_level in zip(box_cls, box_regression):
N, AxC, H, W = box_cls_per_level.shape
Ax4 = box_regression_per_level.shape[1]
A = Ax4 // 4
C = AxC // A
box_cls_per_level = permute_and_flatten(box_cls_per_level, N, A, C, H, W)
box_cls_flattened.append(box_cls_per_level)
box_regression_per_level = permute_and_flatten(box_regression_per_level, N, A, 4, H, W)
box_regression_flattened.append(box_regression_per_level)
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls = torch.cat(box_cls_flattened, dim=1).flatten(0, -2)
box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4)
return box_cls, box_regression
class RegionProposalNetwork(torch.nn.Module):
"""
Implements Region Proposal Network (RPN).
Args:
anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
head (nn.Module): module that computes the objectness and regression deltas
fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
pre_nms_top_n (Dict[str, int]): number of proposals to keep before applying NMS. It should
contain two fields: training and testing, to allow for different values depending
on training or evaluation
post_nms_top_n (Dict[str, int]): number of proposals to keep after applying NMS. It should
contain two fields: training and testing, to allow for different values depending
on training or evaluation
nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
score_thresh (float): only return proposals with an objectness score greater than score_thresh
"""
__annotations__ = {
"box_coder": det_utils.BoxCoder,
"proposal_matcher": det_utils.Matcher,
"fg_bg_sampler": det_utils.BalancedPositiveNegativeSampler,
}
def __init__(
self,
anchor_generator: AnchorGenerator,
head: nn.Module,
# Faster-RCNN Training
fg_iou_thresh: float,
bg_iou_thresh: float,
batch_size_per_image: int,
positive_fraction: float,
# Faster-RCNN Inference
pre_nms_top_n: Dict[str, int],
post_nms_top_n: Dict[str, int],
nms_thresh: float,
score_thresh: float = 0.0,
) -> None:
super().__init__()
self.anchor_generator = anchor_generator
self.head = head
self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
# used during training
self.box_similarity = box_ops.box_iou
self.proposal_matcher = det_utils.Matcher(
fg_iou_thresh,
bg_iou_thresh,
allow_low_quality_matches=True,
)
self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(batch_size_per_image, positive_fraction)
# used during testing
self._pre_nms_top_n = pre_nms_top_n
self._post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.score_thresh = score_thresh
self.min_size = 1e-3
def pre_nms_top_n(self) -> int:
if self.training:
return self._pre_nms_top_n["training"]
return self._pre_nms_top_n["testing"]
def post_nms_top_n(self) -> int:
if self.training:
return self._post_nms_top_n["training"]
return self._post_nms_top_n["testing"]
def assign_targets_to_anchors(
self, anchors: List[Tensor], targets: List[Dict[str, Tensor]]
) -> Tuple[List[Tensor], List[Tensor]]:
labels = []
matched_gt_boxes = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
gt_boxes = targets_per_image["boxes"]
if gt_boxes.numel() == 0:
# Background image (negative example)
device = anchors_per_image.device
matched_gt_boxes_per_image = torch.zeros(anchors_per_image.shape, dtype=torch.float32, device=device)
labels_per_image = torch.zeros((anchors_per_image.shape[0],), dtype=torch.float32, device=device)
else:
match_quality_matrix = self.box_similarity(gt_boxes, anchors_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_gt_boxes_per_image = gt_boxes[matched_idxs.clamp(min=0)]
labels_per_image = matched_idxs >= 0
labels_per_image = labels_per_image.to(dtype=torch.float32)
# Background (negative examples)
bg_indices = matched_idxs == self.proposal_matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_indices] = 0.0
# discard indices that are between thresholds
inds_to_discard = matched_idxs == self.proposal_matcher.BETWEEN_THRESHOLDS
labels_per_image[inds_to_discard] = -1.0
labels.append(labels_per_image)
matched_gt_boxes.append(matched_gt_boxes_per_image)
return labels, matched_gt_boxes
def _get_top_n_idx(self, objectness: Tensor, num_anchors_per_level: List[int]) -> Tensor:
r = []
offset = 0
for ob in objectness.split(num_anchors_per_level, 1):
num_anchors = ob.shape[1]
pre_nms_top_n = det_utils._topk_min(ob, self.pre_nms_top_n(), 1)
_, top_n_idx = ob.topk(pre_nms_top_n, dim=1)
r.append(top_n_idx + offset)
offset += num_anchors
return torch.cat(r, dim=1)
def filter_proposals(
self,
proposals: Tensor,
objectness: Tensor,
image_shapes: List[Tuple[int, int]],
num_anchors_per_level: List[int],
) -> Tuple[List[Tensor], List[Tensor]]:
num_images = proposals.shape[0]
device = proposals.device
# do not backprop through objectness
objectness = objectness.detach()
objectness = objectness.reshape(num_images, -1)
levels = [
torch.full((n,), idx, dtype=torch.int64, device=device) for idx, n in enumerate(num_anchors_per_level)
]
levels = torch.cat(levels, 0)
levels = levels.reshape(1, -1).expand_as(objectness)
# select top_n boxes independently per level before applying nms
top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)
image_range = torch.arange(num_images, device=device)
batch_idx = image_range[:, None]
objectness = objectness[batch_idx, top_n_idx]
levels = levels[batch_idx, top_n_idx]
proposals = proposals[batch_idx, top_n_idx]
objectness_prob = torch.sigmoid(objectness)
final_boxes = []
final_scores = []
for boxes, scores, lvl, img_shape in zip(proposals, objectness_prob, levels, image_shapes):
boxes = box_ops.clip_boxes_to_image(boxes, img_shape)
# remove small boxes
keep = box_ops.remove_small_boxes(boxes, self.min_size)
boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
# remove low scoring boxes
# use >= for Backwards compatibility
keep = torch.where(scores >= self.score_thresh)[0]
boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
# non-maximum suppression, independently done per level
keep = box_ops.batched_nms(boxes, scores, lvl, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.post_nms_top_n()]
boxes, scores = boxes[keep], scores[keep]
final_boxes.append(boxes)
final_scores.append(scores)
return final_boxes, final_scores
def compute_loss(
self, objectness: Tensor, pred_bbox_deltas: Tensor, labels: List[Tensor], regression_targets: List[Tensor]
) -> Tuple[Tensor, Tensor]:
"""
Args:
objectness (Tensor)
pred_bbox_deltas (Tensor)
labels (List[Tensor])
regression_targets (List[Tensor])
Returns:
objectness_loss (Tensor)
box_loss (Tensor)
"""
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]
sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
objectness = objectness.flatten()
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = F.smooth_l1_loss(
pred_bbox_deltas[sampled_pos_inds],
regression_targets[sampled_pos_inds],
beta=1 / 9,
reduction="sum",
) / (sampled_inds.numel())
objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds])
return objectness_loss, box_loss
def forward(
self,
images: ImageList,
features: Dict[str, Tensor],
targets: Optional[List[Dict[str, Tensor]]] = None,
) -> Tuple[List[Tensor], Dict[str, Tensor]]:
"""
Args:
images (ImageList): images for which we want to compute the predictions
features (Dict[str, Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (List[Dict[str, Tensor]]): ground-truth boxes present in the image (optional).
If provided, each element in the dict should contain a field `boxes`,
with the locations of the ground-truth boxes.
Returns:
boxes (List[Tensor]): the predicted boxes from the RPN, one Tensor per
image.
losses (Dict[str, Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
# RPN uses all feature maps that are available
features = list(features.values())
objectness, pred_bbox_deltas = self.head(features)
anchors = self.anchor_generator(images, features)
num_images = len(anchors)
num_anchors_per_level_shape_tensors = [o[0].shape for o in objectness]
num_anchors_per_level = [s[0] * s[1] * s[2] for s in num_anchors_per_level_shape_tensors]
objectness, pred_bbox_deltas = concat_box_prediction_layers(objectness, pred_bbox_deltas)
# apply pred_bbox_deltas to anchors to obtain the decoded proposals
# note that we detach the deltas because Faster R-CNN do not backprop through
# the proposals
proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
proposals = proposals.view(num_images, -1, 4)
boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)
losses = {}
if self.training:
if targets is None:
raise ValueError("targets should not be None")
labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)
regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)
loss_objectness, loss_rpn_box_reg = self.compute_loss(
objectness, pred_bbox_deltas, labels, regression_targets
)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return boxes, losses
```
|
===========================================================================================================================
SOURCE CODE FILE: ssd.py
LINES: 1
SIZE: 28.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\ssd.py
ENCODING: utf-8
```py
import warnings
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from ...ops import boxes as box_ops
from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _COCO_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface
from ..vgg import VGG, vgg16, VGG16_Weights
from . import _utils as det_utils
from .anchor_utils import DefaultBoxGenerator
from .backbone_utils import _validate_trainable_layers
from .transform import GeneralizedRCNNTransform
__all__ = [
"SSD300_VGG16_Weights",
"ssd300_vgg16",
]
class SSD300_VGG16_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/ssd300_vgg16_coco-b556d3b4.pth",
transforms=ObjectDetection,
meta={
"num_params": 35641826,
"categories": _COCO_CATEGORIES,
"min_size": (1, 1),
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssd300-vgg16",
"_metrics": {
"COCO-val2017": {
"box_map": 25.1,
}
},
"_ops": 34.858,
"_file_size": 135.988,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
def _xavier_init(conv: nn.Module):
for layer in conv.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.xavier_uniform_(layer.weight)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0.0)
class SSDHead(nn.Module):
def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int):
super().__init__()
self.classification_head = SSDClassificationHead(in_channels, num_anchors, num_classes)
self.regression_head = SSDRegressionHead(in_channels, num_anchors)
def forward(self, x: List[Tensor]) -> Dict[str, Tensor]:
return {
"bbox_regression": self.regression_head(x),
"cls_logits": self.classification_head(x),
}
class SSDScoringHead(nn.Module):
def __init__(self, module_list: nn.ModuleList, num_columns: int):
super().__init__()
self.module_list = module_list
self.num_columns = num_columns
def _get_result_from_module_list(self, x: Tensor, idx: int) -> Tensor:
"""
This is equivalent to self.module_list[idx](x),
but torchscript doesn't support this yet
"""
num_blocks = len(self.module_list)
if idx < 0:
idx += num_blocks
out = x
for i, module in enumerate(self.module_list):
if i == idx:
out = module(x)
return out
def forward(self, x: List[Tensor]) -> Tensor:
all_results = []
for i, features in enumerate(x):
results = self._get_result_from_module_list(features, i)
# Permute output from (N, A * K, H, W) to (N, HWA, K).
N, _, H, W = results.shape
results = results.view(N, -1, self.num_columns, H, W)
results = results.permute(0, 3, 4, 1, 2)
results = results.reshape(N, -1, self.num_columns) # Size=(N, HWA, K)
all_results.append(results)
return torch.cat(all_results, dim=1)
class SSDClassificationHead(SSDScoringHead):
def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int):
cls_logits = nn.ModuleList()
for channels, anchors in zip(in_channels, num_anchors):
cls_logits.append(nn.Conv2d(channels, num_classes * anchors, kernel_size=3, padding=1))
_xavier_init(cls_logits)
super().__init__(cls_logits, num_classes)
class SSDRegressionHead(SSDScoringHead):
def __init__(self, in_channels: List[int], num_anchors: List[int]):
bbox_reg = nn.ModuleList()
for channels, anchors in zip(in_channels, num_anchors):
bbox_reg.append(nn.Conv2d(channels, 4 * anchors, kernel_size=3, padding=1))
_xavier_init(bbox_reg)
super().__init__(bbox_reg, 4)
class SSD(nn.Module):
"""
Implements SSD architecture from `"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes, but they will be resized
to a fixed size before passing it to the backbone.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
The model returns a Dict[Tensor] during training, containing the classification and regression
losses.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each detection
- scores (Tensor[N]): the scores for each detection
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute with the list of the output channels of
each feature map. The backbone should return a single Tensor or an OrderedDict[Tensor].
anchor_generator (DefaultBoxGenerator): module that generates the default boxes for a
set of feature maps.
size (Tuple[int, int]): the width and height to which images will be rescaled before feeding them
to the backbone.
num_classes (int): number of output classes of the model (including the background).
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
head (nn.Module, optional): Module run on top of the backbone features. Defaults to a module containing
a classification and regression module.
score_thresh (float): Score threshold used for postprocessing the detections.
nms_thresh (float): NMS threshold used for postprocessing the detections.
detections_per_img (int): Number of best detections to keep after NMS.
iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training.
topk_candidates (int): Number of best detections to keep before NMS.
positive_fraction (float): a number between 0 and 1 which indicates the proportion of positive
proposals used during the training of the classification head. It is used to estimate the negative to
positive ratio.
"""
__annotations__ = {
"box_coder": det_utils.BoxCoder,
"proposal_matcher": det_utils.Matcher,
}
def __init__(
self,
backbone: nn.Module,
anchor_generator: DefaultBoxGenerator,
size: Tuple[int, int],
num_classes: int,
image_mean: Optional[List[float]] = None,
image_std: Optional[List[float]] = None,
head: Optional[nn.Module] = None,
score_thresh: float = 0.01,
nms_thresh: float = 0.45,
detections_per_img: int = 200,
iou_thresh: float = 0.5,
topk_candidates: int = 400,
positive_fraction: float = 0.25,
**kwargs: Any,
):
super().__init__()
_log_api_usage_once(self)
self.backbone = backbone
self.anchor_generator = anchor_generator
self.box_coder = det_utils.BoxCoder(weights=(10.0, 10.0, 5.0, 5.0))
if head is None:
if hasattr(backbone, "out_channels"):
out_channels = backbone.out_channels
else:
out_channels = det_utils.retrieve_out_channels(backbone, size)
if len(out_channels) != len(anchor_generator.aspect_ratios):
raise ValueError(
f"The length of the output channels from the backbone ({len(out_channels)}) do not match the length of the anchor generator aspect ratios ({len(anchor_generator.aspect_ratios)})"
)
num_anchors = self.anchor_generator.num_anchors_per_location()
head = SSDHead(out_channels, num_anchors, num_classes)
self.head = head
self.proposal_matcher = det_utils.SSDMatcher(iou_thresh)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
self.transform = GeneralizedRCNNTransform(
min(size), max(size), image_mean, image_std, size_divisible=1, fixed_size=size, **kwargs
)
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.detections_per_img = detections_per_img
self.topk_candidates = topk_candidates
self.neg_to_pos_ratio = (1.0 - positive_fraction) / positive_fraction
# used only on torchscript mode
self._has_warned = False
@torch.jit.unused
def eager_outputs(
self, losses: Dict[str, Tensor], detections: List[Dict[str, Tensor]]
) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:
if self.training:
return losses
return detections
def compute_loss(
self,
targets: List[Dict[str, Tensor]],
head_outputs: Dict[str, Tensor],
anchors: List[Tensor],
matched_idxs: List[Tensor],
) -> Dict[str, Tensor]:
bbox_regression = head_outputs["bbox_regression"]
cls_logits = head_outputs["cls_logits"]
# Match original targets with default boxes
num_foreground = 0
bbox_loss = []
cls_targets = []
for (
targets_per_image,
bbox_regression_per_image,
cls_logits_per_image,
anchors_per_image,
matched_idxs_per_image,
) in zip(targets, bbox_regression, cls_logits, anchors, matched_idxs):
# produce the matching between boxes and targets
foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0]
foreground_matched_idxs_per_image = matched_idxs_per_image[foreground_idxs_per_image]
num_foreground += foreground_matched_idxs_per_image.numel()
# Calculate regression loss
matched_gt_boxes_per_image = targets_per_image["boxes"][foreground_matched_idxs_per_image]
bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :]
anchors_per_image = anchors_per_image[foreground_idxs_per_image, :]
target_regression = self.box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image)
bbox_loss.append(
torch.nn.functional.smooth_l1_loss(bbox_regression_per_image, target_regression, reduction="sum")
)
# Estimate ground truth for class targets
gt_classes_target = torch.zeros(
(cls_logits_per_image.size(0),),
dtype=targets_per_image["labels"].dtype,
device=targets_per_image["labels"].device,
)
gt_classes_target[foreground_idxs_per_image] = targets_per_image["labels"][
foreground_matched_idxs_per_image
]
cls_targets.append(gt_classes_target)
bbox_loss = torch.stack(bbox_loss)
cls_targets = torch.stack(cls_targets)
# Calculate classification loss
num_classes = cls_logits.size(-1)
cls_loss = F.cross_entropy(cls_logits.view(-1, num_classes), cls_targets.view(-1), reduction="none").view(
cls_targets.size()
)
# Hard Negative Sampling
foreground_idxs = cls_targets > 0
num_negative = self.neg_to_pos_ratio * foreground_idxs.sum(1, keepdim=True)
# num_negative[num_negative < self.neg_to_pos_ratio] = self.neg_to_pos_ratio
negative_loss = cls_loss.clone()
negative_loss[foreground_idxs] = -float("inf") # use -inf to detect positive values that creeped in the sample
values, idx = negative_loss.sort(1, descending=True)
# background_idxs = torch.logical_and(idx.sort(1)[1] < num_negative, torch.isfinite(values))
background_idxs = idx.sort(1)[1] < num_negative
N = max(1, num_foreground)
return {
"bbox_regression": bbox_loss.sum() / N,
"classification": (cls_loss[foreground_idxs].sum() + cls_loss[background_idxs].sum()) / N,
}
def forward(
self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None
) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else:
torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
# get the original image sizes
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
# transform the input
images, targets = self.transform(images, targets)
# Check for degenerate boxes
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
# get the features from the backbone
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
features = list(features.values())
# compute the ssd heads outputs using the features
head_outputs = self.head(features)
# create the set of anchors
anchors = self.anchor_generator(images, features)
losses = {}
detections: List[Dict[str, Tensor]] = []
if self.training:
matched_idxs = []
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for anchors_per_image, targets_per_image in zip(anchors, targets):
if targets_per_image["boxes"].numel() == 0:
matched_idxs.append(
torch.full(
(anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device
)
)
continue
match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image)
matched_idxs.append(self.proposal_matcher(match_quality_matrix))
losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs)
else:
detections = self.postprocess_detections(head_outputs, anchors, images.image_sizes)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
if torch.jit.is_scripting():
if not self._has_warned:
warnings.warn("SSD always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True
return losses, detections
return self.eager_outputs(losses, detections)
def postprocess_detections(
self, head_outputs: Dict[str, Tensor], image_anchors: List[Tensor], image_shapes: List[Tuple[int, int]]
) -> List[Dict[str, Tensor]]:
bbox_regression = head_outputs["bbox_regression"]
pred_scores = F.softmax(head_outputs["cls_logits"], dim=-1)
num_classes = pred_scores.size(-1)
device = pred_scores.device
detections: List[Dict[str, Tensor]] = []
for boxes, scores, anchors, image_shape in zip(bbox_regression, pred_scores, image_anchors, image_shapes):
boxes = self.box_coder.decode_single(boxes, anchors)
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
image_boxes = []
image_scores = []
image_labels = []
for label in range(1, num_classes):
score = scores[:, label]
keep_idxs = score > self.score_thresh
score = score[keep_idxs]
box = boxes[keep_idxs]
# keep only topk scoring predictions
num_topk = det_utils._topk_min(score, self.topk_candidates, 0)
score, idxs = score.topk(num_topk)
box = box[idxs]
image_boxes.append(box)
image_scores.append(score)
image_labels.append(torch.full_like(score, fill_value=label, dtype=torch.int64, device=device))
image_boxes = torch.cat(image_boxes, dim=0)
image_scores = torch.cat(image_scores, dim=0)
image_labels = torch.cat(image_labels, dim=0)
# non-maximum suppression
keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh)
keep = keep[: self.detections_per_img]
detections.append(
{
"boxes": image_boxes[keep],
"scores": image_scores[keep],
"labels": image_labels[keep],
}
)
return detections
class SSDFeatureExtractorVGG(nn.Module):
def __init__(self, backbone: nn.Module, highres: bool):
super().__init__()
_, _, maxpool3_pos, maxpool4_pos, _ = (i for i, layer in enumerate(backbone) if isinstance(layer, nn.MaxPool2d))
# Patch ceil_mode for maxpool3 to get the same WxH output sizes as the paper
backbone[maxpool3_pos].ceil_mode = True
# parameters used for L2 regularization + rescaling
self.scale_weight = nn.Parameter(torch.ones(512) * 20)
# Multiple Feature maps - page 4, Fig 2 of SSD paper
self.features = nn.Sequential(*backbone[:maxpool4_pos]) # until conv4_3
# SSD300 case - page 4, Fig 2 of SSD paper
extra = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2), # conv8_2
nn.ReLU(inplace=True),
),
nn.Sequential(
nn.Conv2d(512, 128, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2), # conv9_2
nn.ReLU(inplace=True),
),
nn.Sequential(
nn.Conv2d(256, 128, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 256, kernel_size=3), # conv10_2
nn.ReLU(inplace=True),
),
nn.Sequential(
nn.Conv2d(256, 128, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 256, kernel_size=3), # conv11_2
nn.ReLU(inplace=True),
),
]
)
if highres:
# Additional layers for the SSD512 case. See page 11, footernote 5.
extra.append(
nn.Sequential(
nn.Conv2d(256, 128, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 256, kernel_size=4), # conv12_2
nn.ReLU(inplace=True),
)
)
_xavier_init(extra)
fc = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=False), # add modified maxpool5
nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=6, dilation=6), # FC6 with atrous
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1), # FC7
nn.ReLU(inplace=True),
)
_xavier_init(fc)
extra.insert(
0,
nn.Sequential(
*backbone[maxpool4_pos:-1], # until conv5_3, skip maxpool5
fc,
),
)
self.extra = extra
def forward(self, x: Tensor) -> Dict[str, Tensor]:
# L2 regularization + Rescaling of 1st block's feature map
x = self.features(x)
rescaled = self.scale_weight.view(1, -1, 1, 1) * F.normalize(x)
output = [rescaled]
# Calculating Feature maps for the rest blocks
for block in self.extra:
x = block(x)
output.append(x)
return OrderedDict([(str(i), v) for i, v in enumerate(output)])
def _vgg_extractor(backbone: VGG, highres: bool, trainable_layers: int):
backbone = backbone.features
# Gather the indices of maxpools. These are the locations of output blocks.
stage_indices = [0] + [i for i, b in enumerate(backbone) if isinstance(b, nn.MaxPool2d)][:-1]
num_stages = len(stage_indices)
# find the index of the layer from which we won't freeze
torch._assert(
0 <= trainable_layers <= num_stages,
f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}",
)
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]:
for parameter in b.parameters():
parameter.requires_grad_(False)
return SSDFeatureExtractorVGG(backbone, highres)
@register_model()
@handle_legacy_interface(
weights=("pretrained", SSD300_VGG16_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", VGG16_Weights.IMAGENET1K_FEATURES),
)
def ssd300_vgg16(
*,
weights: Optional[SSD300_VGG16_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[VGG16_Weights] = VGG16_Weights.IMAGENET1K_FEATURES,
trainable_backbone_layers: Optional[int] = None,
**kwargs: Any,
) -> SSD:
"""The SSD300 model is based on the `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ paper.
.. betastatus:: detection module
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes, but they will be resized
to a fixed size before passing it to the backbone.
The behavior of the model changes depending on if it is in training or evaluation mode.
During training, the model expects both the input tensors and targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
The model returns a Dict[Tensor] during training, containing the classification and regression
losses.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each detection
- scores (Tensor[N]): the scores for each detection
Example:
>>> model = torchvision.models.detection.ssd300_vgg16(weights=SSD300_VGG16_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 300, 300), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
weights (:class:`~torchvision.models.detection.SSD300_VGG16_Weights`, optional): The pretrained
weights to use. See
:class:`~torchvision.models.detection.SSD300_VGG16_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr
Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
weights_backbone (:class:`~torchvision.models.VGG16_Weights`, optional): The pretrained weights for the
backbone
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 4.
**kwargs: parameters passed to the ``torchvision.models.detection.SSD``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/ssd.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.SSD300_VGG16_Weights
:members:
"""
weights = SSD300_VGG16_Weights.verify(weights)
weights_backbone = VGG16_Weights.verify(weights_backbone)
if "size" in kwargs:
warnings.warn("The size of the model is already fixed; ignoring the parameter.")
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
trainable_backbone_layers = _validate_trainable_layers(
weights is not None or weights_backbone is not None, trainable_backbone_layers, 5, 4
)
# Use custom backbones more appropriate for SSD
backbone = vgg16(weights=weights_backbone, progress=progress)
backbone = _vgg_extractor(backbone, False, trainable_backbone_layers)
anchor_generator = DefaultBoxGenerator(
[[2], [2, 3], [2, 3], [2, 3], [2], [2]],
scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05],
steps=[8, 16, 32, 64, 100, 300],
)
defaults = {
# Rescale the input in a way compatible to the backbone
"image_mean": [0.48235, 0.45882, 0.40784],
"image_std": [1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0], # undo the 0-1 scaling of toTensor
}
kwargs: Any = {**defaults, **kwargs}
model = SSD(backbone, anchor_generator, (300, 300), num_classes, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
===============================================================================================================================
SOURCE CODE FILE: ssdlite.py
LINES: 1
SIZE: 13.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\ssdlite.py
ENCODING: utf-8
```py
import warnings
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from torch import nn, Tensor
from ...ops.misc import Conv2dNormActivation
from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once
from .. import mobilenet
from .._api import register_model, Weights, WeightsEnum
from .._meta import _COCO_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface
from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights
from . import _utils as det_utils
from .anchor_utils import DefaultBoxGenerator
from .backbone_utils import _validate_trainable_layers
from .ssd import SSD, SSDScoringHead
__all__ = [
"SSDLite320_MobileNet_V3_Large_Weights",
"ssdlite320_mobilenet_v3_large",
]
# Building blocks of SSDlite as described in section 6.2 of MobileNetV2 paper
def _prediction_block(
in_channels: int, out_channels: int, kernel_size: int, norm_layer: Callable[..., nn.Module]
) -> nn.Sequential:
return nn.Sequential(
# 3x3 depthwise with stride 1 and padding 1
Conv2dNormActivation(
in_channels,
in_channels,
kernel_size=kernel_size,
groups=in_channels,
norm_layer=norm_layer,
activation_layer=nn.ReLU6,
),
# 1x1 projetion to output channels
nn.Conv2d(in_channels, out_channels, 1),
)
def _extra_block(in_channels: int, out_channels: int, norm_layer: Callable[..., nn.Module]) -> nn.Sequential:
activation = nn.ReLU6
intermediate_channels = out_channels // 2
return nn.Sequential(
# 1x1 projection to half output channels
Conv2dNormActivation(
in_channels, intermediate_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation
),
# 3x3 depthwise with stride 2 and padding 1
Conv2dNormActivation(
intermediate_channels,
intermediate_channels,
kernel_size=3,
stride=2,
groups=intermediate_channels,
norm_layer=norm_layer,
activation_layer=activation,
),
# 1x1 projetion to output channels
Conv2dNormActivation(
intermediate_channels, out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation
),
)
def _normal_init(conv: nn.Module):
for layer in conv.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0.0, std=0.03)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0.0)
class SSDLiteHead(nn.Module):
def __init__(
self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module]
):
super().__init__()
self.classification_head = SSDLiteClassificationHead(in_channels, num_anchors, num_classes, norm_layer)
self.regression_head = SSDLiteRegressionHead(in_channels, num_anchors, norm_layer)
def forward(self, x: List[Tensor]) -> Dict[str, Tensor]:
return {
"bbox_regression": self.regression_head(x),
"cls_logits": self.classification_head(x),
}
class SSDLiteClassificationHead(SSDScoringHead):
def __init__(
self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module]
):
cls_logits = nn.ModuleList()
for channels, anchors in zip(in_channels, num_anchors):
cls_logits.append(_prediction_block(channels, num_classes * anchors, 3, norm_layer))
_normal_init(cls_logits)
super().__init__(cls_logits, num_classes)
class SSDLiteRegressionHead(SSDScoringHead):
def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: Callable[..., nn.Module]):
bbox_reg = nn.ModuleList()
for channels, anchors in zip(in_channels, num_anchors):
bbox_reg.append(_prediction_block(channels, 4 * anchors, 3, norm_layer))
_normal_init(bbox_reg)
super().__init__(bbox_reg, 4)
class SSDLiteFeatureExtractorMobileNet(nn.Module):
def __init__(
self,
backbone: nn.Module,
c4_pos: int,
norm_layer: Callable[..., nn.Module],
width_mult: float = 1.0,
min_depth: int = 16,
):
super().__init__()
_log_api_usage_once(self)
if backbone[c4_pos].use_res_connect:
raise ValueError("backbone[c4_pos].use_res_connect should be False")
self.features = nn.Sequential(
# As described in section 6.3 of MobileNetV3 paper
nn.Sequential(*backbone[:c4_pos], backbone[c4_pos].block[0]), # from start until C4 expansion layer
nn.Sequential(backbone[c4_pos].block[1:], *backbone[c4_pos + 1 :]), # from C4 depthwise until end
)
get_depth = lambda d: max(min_depth, int(d * width_mult)) # noqa: E731
extra = nn.ModuleList(
[
_extra_block(backbone[-1].out_channels, get_depth(512), norm_layer),
_extra_block(get_depth(512), get_depth(256), norm_layer),
_extra_block(get_depth(256), get_depth(256), norm_layer),
_extra_block(get_depth(256), get_depth(128), norm_layer),
]
)
_normal_init(extra)
self.extra = extra
def forward(self, x: Tensor) -> Dict[str, Tensor]:
# Get feature maps from backbone and extra. Can't be refactored due to JIT limitations.
output = []
for block in self.features:
x = block(x)
output.append(x)
for block in self.extra:
x = block(x)
output.append(x)
return OrderedDict([(str(i), v) for i, v in enumerate(output)])
def _mobilenet_extractor(
backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3],
trainable_layers: int,
norm_layer: Callable[..., nn.Module],
):
backbone = backbone.features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
num_stages = len(stage_indices)
# find the index of the layer from which we won't freeze
if not 0 <= trainable_layers <= num_stages:
raise ValueError("trainable_layers should be in the range [0, {num_stages}], instead got {trainable_layers}")
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]:
for parameter in b.parameters():
parameter.requires_grad_(False)
return SSDLiteFeatureExtractorMobileNet(backbone, stage_indices[-2], norm_layer)
class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum):
COCO_V1 = Weights(
url="https://download.pytorch.org/models/ssdlite320_mobilenet_v3_large_coco-a79551df.pth",
transforms=ObjectDetection,
meta={
"num_params": 3440060,
"categories": _COCO_CATEGORIES,
"min_size": (1, 1),
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssdlite320-mobilenetv3-large",
"_metrics": {
"COCO-val2017": {
"box_map": 21.3,
}
},
"_ops": 0.583,
"_file_size": 13.418,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
DEFAULT = COCO_V1
@register_model()
@handle_legacy_interface(
weights=("pretrained", SSDLite320_MobileNet_V3_Large_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1),
)
def ssdlite320_mobilenet_v3_large(
*,
weights: Optional[SSDLite320_MobileNet_V3_Large_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1,
trainable_backbone_layers: Optional[int] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
**kwargs: Any,
) -> SSD:
"""SSDlite model architecture with input size 320x320 and a MobileNetV3 Large backbone, as
described at `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__ and
`MobileNetV2: Inverted Residuals and Linear Bottlenecks <https://arxiv.org/abs/1801.04381>`__.
.. betastatus:: detection module
See :func:`~torchvision.models.detection.ssd300_vgg16` for more details.
Example:
>>> model = torchvision.models.detection.ssdlite320_mobilenet_v3_large(weights=SSDLite320_MobileNet_V3_Large_Weights.DEFAULT)
>>> model.eval()
>>> x = [torch.rand(3, 320, 320), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
weights (:class:`~torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model
(including the background).
weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The pretrained
weights for the backbone.
trainable_backbone_layers (int, optional): number of trainable (not frozen) layers
starting from final block. Valid values are between 0 and 6, with 6 meaning all
backbone layers are trainable. If ``None`` is passed (the default) this value is
set to 6.
norm_layer (callable, optional): Module specifying the normalization layer to use.
**kwargs: parameters passed to the ``torchvision.models.detection.ssd.SSD``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/detection/ssdlite.py>`_
for more details about this class.
.. autoclass:: torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights
:members:
"""
weights = SSDLite320_MobileNet_V3_Large_Weights.verify(weights)
weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone)
if "size" in kwargs:
warnings.warn("The size of the model is already fixed; ignoring the parameter.")
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 91
trainable_backbone_layers = _validate_trainable_layers(
weights is not None or weights_backbone is not None, trainable_backbone_layers, 6, 6
)
# Enable reduced tail if no pretrained backbone is selected. See Table 6 of MobileNetV3 paper.
reduce_tail = weights_backbone is None
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.03)
backbone = mobilenet_v3_large(
weights=weights_backbone, progress=progress, norm_layer=norm_layer, reduced_tail=reduce_tail, **kwargs
)
if weights_backbone is None:
# Change the default initialization scheme if not pretrained
_normal_init(backbone)
backbone = _mobilenet_extractor(
backbone,
trainable_backbone_layers,
norm_layer,
)
size = (320, 320)
anchor_generator = DefaultBoxGenerator([[2, 3] for _ in range(6)], min_ratio=0.2, max_ratio=0.95)
out_channels = det_utils.retrieve_out_channels(backbone, size)
num_anchors = anchor_generator.num_anchors_per_location()
if len(out_channels) != len(anchor_generator.aspect_ratios):
raise ValueError(
f"The length of the output channels from the backbone {len(out_channels)} do not match the length of the anchor generator aspect ratios {len(anchor_generator.aspect_ratios)}"
)
defaults = {
"score_thresh": 0.001,
"nms_thresh": 0.55,
"detections_per_img": 300,
"topk_candidates": 300,
# Rescale the input in a way compatible to the backbone:
# The following mean/std rescale the data from [0, 1] to [-1, 1]
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
kwargs: Any = {**defaults, **kwargs}
model = SSD(
backbone,
anchor_generator,
size,
num_classes,
head=SSDLiteHead(out_channels, num_anchors, num_classes, norm_layer),
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
=================================================================================================================================
SOURCE CODE FILE: transform.py
LINES: 3
SIZE: 12.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\detection\transform.py
ENCODING: utf-8
```py
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torchvision
from torch import nn, Tensor
from .image_list import ImageList
from .roi_heads import paste_masks_in_image
@torch.jit.unused
def _get_shape_onnx(image: Tensor) -> Tensor:
from torch.onnx import operators
return operators.shape_as_tensor(image)[-2:]
@torch.jit.unused
def _fake_cast_onnx(v: Tensor) -> float:
# ONNX requires a tensor but here we fake its type for JIT.
return v
def _resize_image_and_masks(
image: Tensor,
self_min_size: int,
self_max_size: int,
target: Optional[Dict[str, Tensor]] = None,
fixed_size: Optional[Tuple[int, int]] = None,
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
if torchvision._is_tracing():
im_shape = _get_shape_onnx(image)
elif torch.jit.is_scripting():
im_shape = torch.tensor(image.shape[-2:])
else:
im_shape = image.shape[-2:]
size: Optional[List[int]] = None
scale_factor: Optional[float] = None
recompute_scale_factor: Optional[bool] = None
if fixed_size is not None:
size = [fixed_size[1], fixed_size[0]]
else:
if torch.jit.is_scripting() or torchvision._is_tracing():
min_size = torch.min(im_shape).to(dtype=torch.float32)
max_size = torch.max(im_shape).to(dtype=torch.float32)
self_min_size_f = float(self_min_size)
self_max_size_f = float(self_max_size)
scale = torch.min(self_min_size_f / min_size, self_max_size_f / max_size)
if torchvision._is_tracing():
scale_factor = _fake_cast_onnx(scale)
else:
scale_factor = scale.item()
else:
# Do it the normal way
min_size = min(im_shape)
max_size = max(im_shape)
scale_factor = min(self_min_size / min_size, self_max_size / max_size)
recompute_scale_factor = True
image = torch.nn.functional.interpolate(
image[None],
size=size,
scale_factor=scale_factor,
mode="bilinear",
recompute_scale_factor=recompute_scale_factor,
align_corners=False,
)[0]
if target is None:
return image, target
if "masks" in target:
mask = target["masks"]
mask = torch.nn.functional.interpolate(
mask[:, None].float(), size=size, scale_factor=scale_factor, recompute_scale_factor=recompute_scale_factor
)[:, 0].byte()
target["masks"] = mask
return image, target
class GeneralizedRCNNTransform(nn.Module):
"""
Performs input / target transformation before feeding the data to a GeneralizedRCNN
model.
The transformations it performs are:
- input normalization (mean subtraction and std division)
- input / target resizing to match min_size / max_size
It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets
"""
def __init__(
self,
min_size: int,
max_size: int,
image_mean: List[float],
image_std: List[float],
size_divisible: int = 32,
fixed_size: Optional[Tuple[int, int]] = None,
**kwargs: Any,
):
super().__init__()
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
self.image_mean = image_mean
self.image_std = image_std
self.size_divisible = size_divisible
self.fixed_size = fixed_size
self._skip_resize = kwargs.pop("_skip_resize", False)
def forward(
self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None
) -> Tuple[ImageList, Optional[List[Dict[str, Tensor]]]]:
images = [img for img in images]
if targets is not None:
# make a copy of targets to avoid modifying it in-place
# once torchscript supports dict comprehension
# this can be simplified as follows
# targets = [{k: v for k,v in t.items()} for t in targets]
targets_copy: List[Dict[str, Tensor]] = []
for t in targets:
data: Dict[str, Tensor] = {}
for k, v in t.items():
data[k] = v
targets_copy.append(data)
targets = targets_copy
for i in range(len(images)):
image = images[i]
target_index = targets[i] if targets is not None else None
if image.dim() != 3:
raise ValueError(f"images is expected to be a list of 3d tensors of shape [C, H, W], got {image.shape}")
image = self.normalize(image)
image, target_index = self.resize(image, target_index)
images[i] = image
if targets is not None and target_index is not None:
targets[i] = target_index
image_sizes = [img.shape[-2:] for img in images]
images = self.batch_images(images, size_divisible=self.size_divisible)
image_sizes_list: List[Tuple[int, int]] = []
for image_size in image_sizes:
torch._assert(
len(image_size) == 2,
f"Input tensors expected to have in the last two elements H and W, instead got {image_size}",
)
image_sizes_list.append((image_size[0], image_size[1]))
image_list = ImageList(images, image_sizes_list)
return image_list, targets
def normalize(self, image: Tensor) -> Tensor:
if not image.is_floating_point():
raise TypeError(
f"Expected input images to be of floating type (in range [0, 1]), "
f"but found type {image.dtype} instead"
)
dtype, device = image.dtype, image.device
mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device)
std = torch.as_tensor(self.image_std, dtype=dtype, device=device)
return (image - mean[:, None, None]) / std[:, None, None]
def torch_choice(self, k: List[int]) -> int:
"""
Implements `random.choice` via torch ops, so it can be compiled with
TorchScript and we use PyTorch's RNG (not native RNG)
"""
index = int(torch.empty(1).uniform_(0.0, float(len(k))).item())
return k[index]
def resize(
self,
image: Tensor,
target: Optional[Dict[str, Tensor]] = None,
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
h, w = image.shape[-2:]
if self.training:
if self._skip_resize:
return image, target
size = self.torch_choice(self.min_size)
else:
size = self.min_size[-1]
image, target = _resize_image_and_masks(image, size, self.max_size, target, self.fixed_size)
if target is None:
return image, target
bbox = target["boxes"]
bbox = resize_boxes(bbox, (h, w), image.shape[-2:])
target["boxes"] = bbox
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = resize_keypoints(keypoints, (h, w), image.shape[-2:])
target["keypoints"] = keypoints
return image, target
# _onnx_batch_images() is an implementation of
# batch_images() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_batch_images(self, images: List[Tensor], size_divisible: int = 32) -> Tensor:
max_size = []
for i in range(images[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in images]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
stride = size_divisible
max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size[2] = (torch.ceil((max_size[2].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# which is not yet supported in onnx
padded_imgs = []
for img in images:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
return torch.stack(padded_imgs)
def max_by_axis(self, the_list: List[List[int]]) -> List[int]:
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def batch_images(self, images: List[Tensor], size_divisible: int = 32) -> Tensor:
if torchvision._is_tracing():
# batch_images() does not export well to ONNX
# call _onnx_batch_images() instead
return self._onnx_batch_images(images, size_divisible)
max_size = self.max_by_axis([list(img.shape) for img in images])
stride = float(size_divisible)
max_size = list(max_size)
max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride)
max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride)
batch_shape = [len(images)] + max_size
batched_imgs = images[0].new_full(batch_shape, 0)
for i in range(batched_imgs.shape[0]):
img = images[i]
batched_imgs[i, : img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
return batched_imgs
def postprocess(
self,
result: List[Dict[str, Tensor]],
image_shapes: List[Tuple[int, int]],
original_image_sizes: List[Tuple[int, int]],
) -> List[Dict[str, Tensor]]:
if self.training:
return result
for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)):
boxes = pred["boxes"]
boxes = resize_boxes(boxes, im_s, o_im_s)
result[i]["boxes"] = boxes
if "masks" in pred:
masks = pred["masks"]
masks = paste_masks_in_image(masks, boxes, o_im_s)
result[i]["masks"] = masks
if "keypoints" in pred:
keypoints = pred["keypoints"]
keypoints = resize_keypoints(keypoints, im_s, o_im_s)
result[i]["keypoints"] = keypoints
return result
def __repr__(self) -> str:
format_string = f"{self.__class__.__name__}("
_indent = "\n "
format_string += f"{_indent}Normalize(mean={self.image_mean}, std={self.image_std})"
format_string += f"{_indent}Resize(min_size={self.min_size}, max_size={self.max_size}, mode='bilinear')"
format_string += "\n)"
return format_string
def resize_keypoints(keypoints: Tensor, original_size: List[int], new_size: List[int]) -> Tensor:
ratios = [
torch.tensor(s, dtype=torch.float32, device=keypoints.device)
/ torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_h, ratio_w = ratios
resized_data = keypoints.clone()
if torch._C._get_tracing_state():
resized_data_0 = resized_data[:, :, 0] * ratio_w
resized_data_1 = resized_data[:, :, 1] * ratio_h
resized_data = torch.stack((resized_data_0, resized_data_1, resized_data[:, :, 2]), dim=2)
else:
resized_data[..., 0] *= ratio_w
resized_data[..., 1] *= ratio_h
return resized_data
def resize_boxes(boxes: Tensor, original_size: List[int], new_size: List[int]) -> Tensor:
ratios = [
torch.tensor(s, dtype=torch.float32, device=boxes.device)
/ torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_height, ratio_width = ratios
xmin, ymin, xmax, ymax = boxes.unbind(1)
xmin = xmin * ratio_width
xmax = xmax * ratio_width
ymin = ymin * ratio_height
ymax = ymax * ratio_height
return torch.stack((xmin, ymin, xmax, ymax), dim=1)
```
|
==========================================================================================================================
SOURCE CODE FILE: efficientnet.py
LINES: 1
SIZE: 43.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\efficientnet.py
ENCODING: utf-8
```py
import copy
import math
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import nn, Tensor
from torchvision.ops import StochasticDepth
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation
from ..transforms._presets import ImageClassification, InterpolationMode
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
__all__ = [
"EfficientNet",
"EfficientNet_B0_Weights",
"EfficientNet_B1_Weights",
"EfficientNet_B2_Weights",
"EfficientNet_B3_Weights",
"EfficientNet_B4_Weights",
"EfficientNet_B5_Weights",
"EfficientNet_B6_Weights",
"EfficientNet_B7_Weights",
"EfficientNet_V2_S_Weights",
"EfficientNet_V2_M_Weights",
"EfficientNet_V2_L_Weights",
"efficientnet_b0",
"efficientnet_b1",
"efficientnet_b2",
"efficientnet_b3",
"efficientnet_b4",
"efficientnet_b5",
"efficientnet_b6",
"efficientnet_b7",
"efficientnet_v2_s",
"efficientnet_v2_m",
"efficientnet_v2_l",
]
@dataclass
class _MBConvConfig:
expand_ratio: float
kernel: int
stride: int
input_channels: int
out_channels: int
num_layers: int
block: Callable[..., nn.Module]
@staticmethod
def adjust_channels(channels: int, width_mult: float, min_value: Optional[int] = None) -> int:
return _make_divisible(channels * width_mult, 8, min_value)
class MBConvConfig(_MBConvConfig):
# Stores information listed at Table 1 of the EfficientNet paper & Table 4 of the EfficientNetV2 paper
def __init__(
self,
expand_ratio: float,
kernel: int,
stride: int,
input_channels: int,
out_channels: int,
num_layers: int,
width_mult: float = 1.0,
depth_mult: float = 1.0,
block: Optional[Callable[..., nn.Module]] = None,
) -> None:
input_channels = self.adjust_channels(input_channels, width_mult)
out_channels = self.adjust_channels(out_channels, width_mult)
num_layers = self.adjust_depth(num_layers, depth_mult)
if block is None:
block = MBConv
super().__init__(expand_ratio, kernel, stride, input_channels, out_channels, num_layers, block)
@staticmethod
def adjust_depth(num_layers: int, depth_mult: float):
return int(math.ceil(num_layers * depth_mult))
class FusedMBConvConfig(_MBConvConfig):
# Stores information listed at Table 4 of the EfficientNetV2 paper
def __init__(
self,
expand_ratio: float,
kernel: int,
stride: int,
input_channels: int,
out_channels: int,
num_layers: int,
block: Optional[Callable[..., nn.Module]] = None,
) -> None:
if block is None:
block = FusedMBConv
super().__init__(expand_ratio, kernel, stride, input_channels, out_channels, num_layers, block)
class MBConv(nn.Module):
def __init__(
self,
cnf: MBConvConfig,
stochastic_depth_prob: float,
norm_layer: Callable[..., nn.Module],
se_layer: Callable[..., nn.Module] = SqueezeExcitation,
) -> None:
super().__init__()
if not (1 <= cnf.stride <= 2):
raise ValueError("illegal stride value")
self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
layers: List[nn.Module] = []
activation_layer = nn.SiLU
# expand
expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
if expanded_channels != cnf.input_channels:
layers.append(
Conv2dNormActivation(
cnf.input_channels,
expanded_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
# depthwise
layers.append(
Conv2dNormActivation(
expanded_channels,
expanded_channels,
kernel_size=cnf.kernel,
stride=cnf.stride,
groups=expanded_channels,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
# squeeze and excitation
squeeze_channels = max(1, cnf.input_channels // 4)
layers.append(se_layer(expanded_channels, squeeze_channels, activation=partial(nn.SiLU, inplace=True)))
# project
layers.append(
Conv2dNormActivation(
expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
)
)
self.block = nn.Sequential(*layers)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
self.out_channels = cnf.out_channels
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result = self.stochastic_depth(result)
result += input
return result
class FusedMBConv(nn.Module):
def __init__(
self,
cnf: FusedMBConvConfig,
stochastic_depth_prob: float,
norm_layer: Callable[..., nn.Module],
) -> None:
super().__init__()
if not (1 <= cnf.stride <= 2):
raise ValueError("illegal stride value")
self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
layers: List[nn.Module] = []
activation_layer = nn.SiLU
expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
if expanded_channels != cnf.input_channels:
# fused expand
layers.append(
Conv2dNormActivation(
cnf.input_channels,
expanded_channels,
kernel_size=cnf.kernel,
stride=cnf.stride,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
# project
layers.append(
Conv2dNormActivation(
expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
)
)
else:
layers.append(
Conv2dNormActivation(
cnf.input_channels,
cnf.out_channels,
kernel_size=cnf.kernel,
stride=cnf.stride,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
self.block = nn.Sequential(*layers)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
self.out_channels = cnf.out_channels
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result = self.stochastic_depth(result)
result += input
return result
class EfficientNet(nn.Module):
def __init__(
self,
inverted_residual_setting: Sequence[Union[MBConvConfig, FusedMBConvConfig]],
dropout: float,
stochastic_depth_prob: float = 0.2,
num_classes: int = 1000,
norm_layer: Optional[Callable[..., nn.Module]] = None,
last_channel: Optional[int] = None,
) -> None:
"""
EfficientNet V1 and V2 main class
Args:
inverted_residual_setting (Sequence[Union[MBConvConfig, FusedMBConvConfig]]): Network structure
dropout (float): The droupout probability
stochastic_depth_prob (float): The stochastic depth probability
num_classes (int): Number of classes
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
last_channel (int): The number of channels on the penultimate layer
"""
super().__init__()
_log_api_usage_once(self)
if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
elif not (
isinstance(inverted_residual_setting, Sequence)
and all([isinstance(s, _MBConvConfig) for s in inverted_residual_setting])
):
raise TypeError("The inverted_residual_setting should be List[MBConvConfig]")
if norm_layer is None:
norm_layer = nn.BatchNorm2d
layers: List[nn.Module] = []
# building first layer
firstconv_output_channels = inverted_residual_setting[0].input_channels
layers.append(
Conv2dNormActivation(
3, firstconv_output_channels, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=nn.SiLU
)
)
# building inverted residual blocks
total_stage_blocks = sum(cnf.num_layers for cnf in inverted_residual_setting)
stage_block_id = 0
for cnf in inverted_residual_setting:
stage: List[nn.Module] = []
for _ in range(cnf.num_layers):
# copy to avoid modifications. shallow copy is enough
block_cnf = copy.copy(cnf)
# overwrite info if not the first conv in the stage
if stage:
block_cnf.input_channels = block_cnf.out_channels
block_cnf.stride = 1
# adjust stochastic depth probability based on the depth of the stage block
sd_prob = stochastic_depth_prob * float(stage_block_id) / total_stage_blocks
stage.append(block_cnf.block(block_cnf, sd_prob, norm_layer))
stage_block_id += 1
layers.append(nn.Sequential(*stage))
# building last several layers
lastconv_input_channels = inverted_residual_setting[-1].out_channels
lastconv_output_channels = last_channel if last_channel is not None else 4 * lastconv_input_channels
layers.append(
Conv2dNormActivation(
lastconv_input_channels,
lastconv_output_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.SiLU,
)
)
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout, inplace=True),
nn.Linear(lastconv_output_channels, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init_range = 1.0 / math.sqrt(m.out_features)
nn.init.uniform_(m.weight, -init_range, init_range)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _efficientnet(
inverted_residual_setting: Sequence[Union[MBConvConfig, FusedMBConvConfig]],
dropout: float,
last_channel: Optional[int],
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> EfficientNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = EfficientNet(inverted_residual_setting, dropout, last_channel=last_channel, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
def _efficientnet_conf(
arch: str,
**kwargs: Any,
) -> Tuple[Sequence[Union[MBConvConfig, FusedMBConvConfig]], Optional[int]]:
inverted_residual_setting: Sequence[Union[MBConvConfig, FusedMBConvConfig]]
if arch.startswith("efficientnet_b"):
bneck_conf = partial(MBConvConfig, width_mult=kwargs.pop("width_mult"), depth_mult=kwargs.pop("depth_mult"))
inverted_residual_setting = [
bneck_conf(1, 3, 1, 32, 16, 1),
bneck_conf(6, 3, 2, 16, 24, 2),
bneck_conf(6, 5, 2, 24, 40, 2),
bneck_conf(6, 3, 2, 40, 80, 3),
bneck_conf(6, 5, 1, 80, 112, 3),
bneck_conf(6, 5, 2, 112, 192, 4),
bneck_conf(6, 3, 1, 192, 320, 1),
]
last_channel = None
elif arch.startswith("efficientnet_v2_s"):
inverted_residual_setting = [
FusedMBConvConfig(1, 3, 1, 24, 24, 2),
FusedMBConvConfig(4, 3, 2, 24, 48, 4),
FusedMBConvConfig(4, 3, 2, 48, 64, 4),
MBConvConfig(4, 3, 2, 64, 128, 6),
MBConvConfig(6, 3, 1, 128, 160, 9),
MBConvConfig(6, 3, 2, 160, 256, 15),
]
last_channel = 1280
elif arch.startswith("efficientnet_v2_m"):
inverted_residual_setting = [
FusedMBConvConfig(1, 3, 1, 24, 24, 3),
FusedMBConvConfig(4, 3, 2, 24, 48, 5),
FusedMBConvConfig(4, 3, 2, 48, 80, 5),
MBConvConfig(4, 3, 2, 80, 160, 7),
MBConvConfig(6, 3, 1, 160, 176, 14),
MBConvConfig(6, 3, 2, 176, 304, 18),
MBConvConfig(6, 3, 1, 304, 512, 5),
]
last_channel = 1280
elif arch.startswith("efficientnet_v2_l"):
inverted_residual_setting = [
FusedMBConvConfig(1, 3, 1, 32, 32, 4),
FusedMBConvConfig(4, 3, 2, 32, 64, 7),
FusedMBConvConfig(4, 3, 2, 64, 96, 7),
MBConvConfig(4, 3, 2, 96, 192, 10),
MBConvConfig(6, 3, 1, 192, 224, 19),
MBConvConfig(6, 3, 2, 224, 384, 25),
MBConvConfig(6, 3, 1, 384, 640, 7),
]
last_channel = 1280
else:
raise ValueError(f"Unsupported model type {arch}")
return inverted_residual_setting, last_channel
_COMMON_META: Dict[str, Any] = {
"categories": _IMAGENET_CATEGORIES,
}
_COMMON_META_V1 = {
**_COMMON_META,
"min_size": (1, 1),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v1",
}
_COMMON_META_V2 = {
**_COMMON_META,
"min_size": (33, 33),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v2",
}
class EfficientNet_B0_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/rwightman/pytorch-image-models/
url="https://download.pytorch.org/models/efficientnet_b0_rwightman-7f5810bc.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=256, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 5288548,
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.692,
"acc@5": 93.532,
}
},
"_ops": 0.386,
"_file_size": 20.451,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_B1_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/rwightman/pytorch-image-models/
url="https://download.pytorch.org/models/efficientnet_b1_rwightman-bac287d4.pth",
transforms=partial(
ImageClassification, crop_size=240, resize_size=256, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 7794184,
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.642,
"acc@5": 94.186,
}
},
"_ops": 0.687,
"_file_size": 30.134,
"_docs": """These weights are ported from the original paper.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/efficientnet_b1-c27df63c.pth",
transforms=partial(
ImageClassification, crop_size=240, resize_size=255, interpolation=InterpolationMode.BILINEAR
),
meta={
**_COMMON_META_V1,
"num_params": 7794184,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuning",
"_metrics": {
"ImageNet-1K": {
"acc@1": 79.838,
"acc@5": 94.934,
}
},
"_ops": 0.687,
"_file_size": 30.136,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class EfficientNet_B2_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/rwightman/pytorch-image-models/
url="https://download.pytorch.org/models/efficientnet_b2_rwightman-c35c1473.pth",
transforms=partial(
ImageClassification, crop_size=288, resize_size=288, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 9109994,
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.608,
"acc@5": 95.310,
}
},
"_ops": 1.088,
"_file_size": 35.174,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_B3_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/rwightman/pytorch-image-models/
url="https://download.pytorch.org/models/efficientnet_b3_rwightman-b3899882.pth",
transforms=partial(
ImageClassification, crop_size=300, resize_size=320, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 12233232,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.008,
"acc@5": 96.054,
}
},
"_ops": 1.827,
"_file_size": 47.184,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_B4_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/rwightman/pytorch-image-models/
url="https://download.pytorch.org/models/efficientnet_b4_rwightman-23ab8bcd.pth",
transforms=partial(
ImageClassification, crop_size=380, resize_size=384, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 19341616,
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.384,
"acc@5": 96.594,
}
},
"_ops": 4.394,
"_file_size": 74.489,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_B5_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
url="https://download.pytorch.org/models/efficientnet_b5_lukemelas-1a07897c.pth",
transforms=partial(
ImageClassification, crop_size=456, resize_size=456, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 30389784,
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.444,
"acc@5": 96.628,
}
},
"_ops": 10.266,
"_file_size": 116.864,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_B6_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
url="https://download.pytorch.org/models/efficientnet_b6_lukemelas-24a108a5.pth",
transforms=partial(
ImageClassification, crop_size=528, resize_size=528, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 43040704,
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.008,
"acc@5": 96.916,
}
},
"_ops": 19.068,
"_file_size": 165.362,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_B7_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
url="https://download.pytorch.org/models/efficientnet_b7_lukemelas-c5b4e57e.pth",
transforms=partial(
ImageClassification, crop_size=600, resize_size=600, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META_V1,
"num_params": 66347960,
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.122,
"acc@5": 96.908,
}
},
"_ops": 37.746,
"_file_size": 254.675,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_V2_S_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/efficientnet_v2_s-dd5fe13b.pth",
transforms=partial(
ImageClassification,
crop_size=384,
resize_size=384,
interpolation=InterpolationMode.BILINEAR,
),
meta={
**_COMMON_META_V2,
"num_params": 21458488,
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.228,
"acc@5": 96.878,
}
},
"_ops": 8.366,
"_file_size": 82.704,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_V2_M_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/efficientnet_v2_m-dc08266a.pth",
transforms=partial(
ImageClassification,
crop_size=480,
resize_size=480,
interpolation=InterpolationMode.BILINEAR,
),
meta={
**_COMMON_META_V2,
"num_params": 54139356,
"_metrics": {
"ImageNet-1K": {
"acc@1": 85.112,
"acc@5": 97.156,
}
},
"_ops": 24.582,
"_file_size": 208.01,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
class EfficientNet_V2_L_Weights(WeightsEnum):
# Weights ported from https://github.com/google/automl/tree/master/efficientnetv2
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/efficientnet_v2_l-59c71312.pth",
transforms=partial(
ImageClassification,
crop_size=480,
resize_size=480,
interpolation=InterpolationMode.BICUBIC,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
meta={
**_COMMON_META_V2,
"num_params": 118515272,
"_metrics": {
"ImageNet-1K": {
"acc@1": 85.808,
"acc@5": 97.788,
}
},
"_ops": 56.08,
"_file_size": 454.573,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B0_Weights.IMAGENET1K_V1))
def efficientnet_b0(
*, weights: Optional[EfficientNet_B0_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B0 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B0_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B0_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B0_Weights
:members:
"""
weights = EfficientNet_B0_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b0", width_mult=1.0, depth_mult=1.0)
return _efficientnet(
inverted_residual_setting, kwargs.pop("dropout", 0.2), last_channel, weights, progress, **kwargs
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B1_Weights.IMAGENET1K_V1))
def efficientnet_b1(
*, weights: Optional[EfficientNet_B1_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B1 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B1_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B1_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B1_Weights
:members:
"""
weights = EfficientNet_B1_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b1", width_mult=1.0, depth_mult=1.1)
return _efficientnet(
inverted_residual_setting, kwargs.pop("dropout", 0.2), last_channel, weights, progress, **kwargs
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B2_Weights.IMAGENET1K_V1))
def efficientnet_b2(
*, weights: Optional[EfficientNet_B2_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B2 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B2_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B2_Weights
:members:
"""
weights = EfficientNet_B2_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b2", width_mult=1.1, depth_mult=1.2)
return _efficientnet(
inverted_residual_setting, kwargs.pop("dropout", 0.3), last_channel, weights, progress, **kwargs
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B3_Weights.IMAGENET1K_V1))
def efficientnet_b3(
*, weights: Optional[EfficientNet_B3_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B3 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B3_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B3_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B3_Weights
:members:
"""
weights = EfficientNet_B3_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b3", width_mult=1.2, depth_mult=1.4)
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.3),
last_channel,
weights,
progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B4_Weights.IMAGENET1K_V1))
def efficientnet_b4(
*, weights: Optional[EfficientNet_B4_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B4 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B4_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B4_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B4_Weights
:members:
"""
weights = EfficientNet_B4_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b4", width_mult=1.4, depth_mult=1.8)
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.4),
last_channel,
weights,
progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B5_Weights.IMAGENET1K_V1))
def efficientnet_b5(
*, weights: Optional[EfficientNet_B5_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B5 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B5_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B5_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B5_Weights
:members:
"""
weights = EfficientNet_B5_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b5", width_mult=1.6, depth_mult=2.2)
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.4),
last_channel,
weights,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B6_Weights.IMAGENET1K_V1))
def efficientnet_b6(
*, weights: Optional[EfficientNet_B6_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B6 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B6_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B6_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B6_Weights
:members:
"""
weights = EfficientNet_B6_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b6", width_mult=1.8, depth_mult=2.6)
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.5),
last_channel,
weights,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_B7_Weights.IMAGENET1K_V1))
def efficientnet_b7(
*, weights: Optional[EfficientNet_B7_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""EfficientNet B7 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
Args:
weights (:class:`~torchvision.models.EfficientNet_B7_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_B7_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_B7_Weights
:members:
"""
weights = EfficientNet_B7_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b7", width_mult=2.0, depth_mult=3.1)
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.5),
last_channel,
weights,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_V2_S_Weights.IMAGENET1K_V1))
def efficientnet_v2_s(
*, weights: Optional[EfficientNet_V2_S_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""
Constructs an EfficientNetV2-S architecture from
`EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.
Args:
weights (:class:`~torchvision.models.EfficientNet_V2_S_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_V2_S_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_V2_S_Weights
:members:
"""
weights = EfficientNet_V2_S_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_v2_s")
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.2),
last_channel,
weights,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_V2_M_Weights.IMAGENET1K_V1))
def efficientnet_v2_m(
*, weights: Optional[EfficientNet_V2_M_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""
Constructs an EfficientNetV2-M architecture from
`EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.
Args:
weights (:class:`~torchvision.models.EfficientNet_V2_M_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_V2_M_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_V2_M_Weights
:members:
"""
weights = EfficientNet_V2_M_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_v2_m")
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.3),
last_channel,
weights,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", EfficientNet_V2_L_Weights.IMAGENET1K_V1))
def efficientnet_v2_l(
*, weights: Optional[EfficientNet_V2_L_Weights] = None, progress: bool = True, **kwargs: Any
) -> EfficientNet:
"""
Constructs an EfficientNetV2-L architecture from
`EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.
Args:
weights (:class:`~torchvision.models.EfficientNet_V2_L_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.EfficientNet_V2_L_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.EfficientNet_V2_L_Weights
:members:
"""
weights = EfficientNet_V2_L_Weights.verify(weights)
inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_v2_l")
return _efficientnet(
inverted_residual_setting,
kwargs.pop("dropout", 0.4),
last_channel,
weights,
progress,
norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
**kwargs,
)
```
|
================================================================================================================================
SOURCE CODE FILE: feature_extraction.py
LINES: 1
SIZE: 27.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\feature_extraction.py
ENCODING: utf-8
```py
import copy
import inspect
import math
import re
import warnings
from collections import OrderedDict
from copy import deepcopy
from itertools import chain
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torchvision
from torch import fx, nn
from torch.fx.graph_module import _CodeOnlyModule, _copy_attr, _USER_PRESERVED_ATTRIBUTES_KEY
__all__ = ["create_feature_extractor", "get_graph_node_names"]
class LeafModuleAwareTracer(fx.Tracer):
"""
An fx.Tracer that allows the user to specify a set of leaf modules, i.e.
modules that are not to be traced through. The resulting graph ends up
having single nodes referencing calls to the leaf modules' forward methods.
"""
def __init__(self, *args, **kwargs):
self.leaf_modules = {}
if "leaf_modules" in kwargs:
leaf_modules = kwargs.pop("leaf_modules")
self.leaf_modules = leaf_modules
super().__init__(*args, **kwargs)
def is_leaf_module(self, m: nn.Module, module_qualname: str) -> bool:
if isinstance(m, tuple(self.leaf_modules)):
return True
return super().is_leaf_module(m, module_qualname)
class NodePathTracer(LeafModuleAwareTracer):
"""
NodePathTracer is an FX tracer that, for each operation, also records the
name of the Node from which the operation originated. A node name here is
a `.` separated path walking the hierarchy from top level module down to
leaf operation or leaf module. The name of the top level module is not
included as part of the node name. For example, if we trace a module whose
forward method applies a ReLU module, the name for that node will simply
be 'relu'.
Some notes on the specifics:
- Nodes are recorded to `self.node_to_qualname` which is a dictionary
mapping a given Node object to its node name.
- Nodes are recorded in the order which they are executed during
tracing.
- When a duplicate node name is encountered, a suffix of the form
_{int} is added. The counter starts from 1.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Track the qualified name of the Node being traced
self.current_module_qualname = ""
# A map from FX Node to the qualified name\#
# NOTE: This is loosely like the "qualified name" mentioned in the
# torch.fx docs https://pytorch.org/docs/stable/fx.html but adapted
# for the purposes of the torchvision feature extractor
self.node_to_qualname = OrderedDict()
def call_module(self, m: torch.nn.Module, forward: Callable, args, kwargs):
"""
Override of `fx.Tracer.call_module`
This override:
1) Stores away the qualified name of the caller for restoration later
2) Adds the qualified name of the caller to
`current_module_qualname` for retrieval by `create_proxy`
3) Once a leaf module is reached, calls `create_proxy`
4) Restores the caller's qualified name into current_module_qualname
"""
old_qualname = self.current_module_qualname
try:
module_qualname = self.path_of_module(m)
self.current_module_qualname = module_qualname
if not self.is_leaf_module(m, module_qualname):
out = forward(*args, **kwargs)
return out
return self.create_proxy("call_module", module_qualname, args, kwargs)
finally:
self.current_module_qualname = old_qualname
def create_proxy(
self, kind: str, target: fx.node.Target, args, kwargs, name=None, type_expr=None, *_
) -> fx.proxy.Proxy:
"""
Override of `Tracer.create_proxy`. This override intercepts the recording
of every operation and stores away the current traced module's qualified
name in `node_to_qualname`
"""
proxy = super().create_proxy(kind, target, args, kwargs, name, type_expr)
self.node_to_qualname[proxy.node] = self._get_node_qualname(self.current_module_qualname, proxy.node)
return proxy
def _get_node_qualname(self, module_qualname: str, node: fx.node.Node) -> str:
node_qualname = module_qualname
if node.op != "call_module":
# In this case module_qualname from torch.fx doesn't go all the
# way to the leaf function/op, so we need to append it
if len(node_qualname) > 0:
# Only append '.' if we are deeper than the top level module
node_qualname += "."
node_qualname += str(node)
# Now we need to add an _{index} postfix on any repeated node names
# For modules we do this from scratch
# But for anything else, torch.fx already has a globally scoped
# _{index} postfix. But we want it locally (relative to direct parent)
# scoped. So first we need to undo the torch.fx postfix
if re.match(r".+_[0-9]+$", node_qualname) is not None:
node_qualname = node_qualname.rsplit("_", 1)[0]
# ... and now we add on our own postfix
for existing_qualname in reversed(self.node_to_qualname.values()):
# Check to see if existing_qualname is of the form
# {node_qualname} or {node_qualname}_{int}
if re.match(rf"{node_qualname}(_[0-9]+)?$", existing_qualname) is not None:
postfix = existing_qualname.replace(node_qualname, "")
if len(postfix):
# existing_qualname is of the form {node_qualname}_{int}
next_index = int(postfix[1:]) + 1
else:
# existing_qualname is of the form {node_qualname}
next_index = 1
node_qualname += f"_{next_index}"
break
return node_qualname
def _is_subseq(x, y):
"""Check if y is a subsequence of x
https://stackoverflow.com/a/24017747/4391249
"""
iter_x = iter(x)
return all(any(x_item == y_item for x_item in iter_x) for y_item in y)
def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathTracer):
"""
Utility function for warning the user if there are differences between
the train graph nodes and the eval graph nodes.
"""
train_nodes = list(train_tracer.node_to_qualname.values())
eval_nodes = list(eval_tracer.node_to_qualname.values())
if len(train_nodes) == len(eval_nodes) and all(t == e for t, e in zip(train_nodes, eval_nodes)):
return
suggestion_msg = (
"When choosing nodes for feature extraction, you may need to specify "
"output nodes for train and eval mode separately."
)
if _is_subseq(train_nodes, eval_nodes):
msg = (
"NOTE: The nodes obtained by tracing the model in eval mode "
"are a subsequence of those obtained in train mode. "
)
elif _is_subseq(eval_nodes, train_nodes):
msg = (
"NOTE: The nodes obtained by tracing the model in train mode "
"are a subsequence of those obtained in eval mode. "
)
else:
msg = "The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. "
warnings.warn(msg + suggestion_msg)
def _get_leaf_modules_for_ops() -> List[type]:
members = inspect.getmembers(torchvision.ops)
result = []
for _, obj in members:
if inspect.isclass(obj) and issubclass(obj, torch.nn.Module):
result.append(obj)
return result
def _set_default_tracer_kwargs(original_tr_kwargs: Optional[Dict[str, Any]]) -> Dict[str, Any]:
default_autowrap_modules = (math, torchvision.ops)
default_leaf_modules = _get_leaf_modules_for_ops()
result_tracer_kwargs = {} if original_tr_kwargs is None else original_tr_kwargs
result_tracer_kwargs["autowrap_modules"] = (
tuple(set(result_tracer_kwargs["autowrap_modules"] + default_autowrap_modules))
if "autowrap_modules" in result_tracer_kwargs
else default_autowrap_modules
)
result_tracer_kwargs["leaf_modules"] = (
list(set(result_tracer_kwargs["leaf_modules"] + default_leaf_modules))
if "leaf_modules" in result_tracer_kwargs
else default_leaf_modules
)
return result_tracer_kwargs
def get_graph_node_names(
model: nn.Module,
tracer_kwargs: Optional[Dict[str, Any]] = None,
suppress_diff_warning: bool = False,
concrete_args: Optional[Dict[str, Any]] = None,
) -> Tuple[List[str], List[str]]:
"""
Dev utility to return node names in order of execution. See note on node
names under :func:`create_feature_extractor`. Useful for seeing which node
names are available for feature extraction. There are two reasons that
node names can't easily be read directly from the code for a model:
1. Not all submodules are traced through. Modules from ``torch.nn`` all
fall within this category.
2. Nodes representing the repeated application of the same operation
or leaf module get a ``_{counter}`` postfix.
The model is traced twice: once in train mode, and once in eval mode. Both
sets of node names are returned.
For more details on the node naming conventions used here, please see the
:ref:`relevant subheading <about-node-names>` in the
`documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
Args:
model (nn.Module): model for which we'd like to print node names
tracer_kwargs (dict, optional): a dictionary of keyword arguments for
``NodePathTracer`` (they are eventually passed onto
`torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
By default, it will be set to wrap and make leaf nodes all torchvision ops:
{"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
provided dictionary.
suppress_diff_warning (bool, optional): whether to suppress a warning
when there are discrepancies between the train and eval version of
the graph. Defaults to False.
concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
not be treated as Proxies. According to the `Pytorch docs
<https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
this parameter's API may not be guaranteed.
Returns:
tuple(list, list): a list of node names from tracing the model in
train mode, and another from tracing the model in eval mode.
Examples::
>>> model = torchvision.models.resnet18()
>>> train_nodes, eval_nodes = get_graph_node_names(model)
"""
tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
is_training = model.training
train_tracer = NodePathTracer(**tracer_kwargs)
train_tracer.trace(model.train(), concrete_args=concrete_args)
eval_tracer = NodePathTracer(**tracer_kwargs)
eval_tracer.trace(model.eval(), concrete_args=concrete_args)
train_nodes = list(train_tracer.node_to_qualname.values())
eval_nodes = list(eval_tracer.node_to_qualname.values())
if not suppress_diff_warning:
_warn_graph_differences(train_tracer, eval_tracer)
# Restore training state
model.train(is_training)
return train_nodes, eval_nodes
class DualGraphModule(fx.GraphModule):
"""
A derivative of `fx.GraphModule`. Differs in the following ways:
- Requires a train and eval version of the underlying graph
- Copies submodules according to the nodes of both train and eval graphs.
- Calling train(mode) switches between train graph and eval graph.
"""
def __init__(
self, root: torch.nn.Module, train_graph: fx.Graph, eval_graph: fx.Graph, class_name: str = "GraphModule"
):
"""
Args:
root (nn.Module): module from which the copied module hierarchy is
built
train_graph (fx.Graph): the graph that should be used in train mode
eval_graph (fx.Graph): the graph that should be used in eval mode
"""
super(fx.GraphModule, self).__init__()
self.__class__.__name__ = class_name
self.train_graph = train_graph
self.eval_graph = eval_graph
# Copy all get_attr and call_module ops (indicated by BOTH train and
# eval graphs)
for node in chain(iter(train_graph.nodes), iter(eval_graph.nodes)):
if node.op in ["get_attr", "call_module"]:
if not isinstance(node.target, str):
raise TypeError(f"node.target should be of type str instead of {type(node.target)}")
_copy_attr(root, self, node.target)
# train mode by default
self.train()
self.graph = train_graph
# (borrowed from fx.GraphModule):
# Store the Tracer class responsible for creating a Graph separately as part of the
# GraphModule state, except when the Tracer is defined in a local namespace.
# Locally defined Tracers are not pickleable. This is needed because torch.package will
# serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
# to re-create the Graph during deserialization.
if self.eval_graph._tracer_cls != self.train_graph._tracer_cls:
raise TypeError(
f"Train mode and eval mode should use the same tracer class. Instead got {self.eval_graph._tracer_cls} for eval vs {self.train_graph._tracer_cls} for train"
)
self._tracer_cls = None
if self.graph._tracer_cls and "<locals>" not in self.graph._tracer_cls.__qualname__:
self._tracer_cls = self.graph._tracer_cls
def train(self, mode=True):
"""
Swap out the graph depending on the selected training mode.
NOTE this should be safe when calling model.eval() because that just
calls this with mode == False.
"""
# NOTE: Only set self.graph if the current graph is not the desired
# one. This saves us from recompiling the graph where not necessary.
if mode and not self.training:
self.graph = self.train_graph
elif not mode and self.training:
self.graph = self.eval_graph
return super().train(mode=mode)
def _deepcopy_init(self):
# See __deepcopy__ below
return DualGraphModule.__init__
def __deepcopy__(self, memo):
# Same as the base class' __deepcopy__ from pytorch, with minor
# modification to account for train_graph and eval_graph
# https://github.com/pytorch/pytorch/blob/f684dbd0026f98f8fa291cab74dbc4d61ba30580/torch/fx/graph_module.py#L875
#
# This is using a bunch of private stuff from torch, so if that breaks,
# we'll likely have to remove this, along with the associated
# non-regression test.
res = type(self).__new__(type(self))
memo[id(self)] = res
fake_mod = _CodeOnlyModule(copy.deepcopy(self.__dict__, memo))
self._deepcopy_init()(res, fake_mod, fake_mod.__dict__["train_graph"], fake_mod.__dict__["eval_graph"])
extra_preserved_attrs = [
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
"_replace_hook",
"_create_node_hooks",
"_erase_node_hooks",
]
for attr in extra_preserved_attrs:
if attr in self.__dict__:
setattr(res, attr, copy.deepcopy(self.__dict__[attr], memo))
res.meta = copy.deepcopy(getattr(self, "meta", {}), memo)
if _USER_PRESERVED_ATTRIBUTES_KEY in res.meta:
for attr_name, attr in res.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items():
setattr(res, attr_name, attr)
return res
def create_feature_extractor(
model: nn.Module,
return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
tracer_kwargs: Optional[Dict[str, Any]] = None,
suppress_diff_warning: bool = False,
concrete_args: Optional[Dict[str, Any]] = None,
) -> fx.GraphModule:
"""
Creates a new graph module that returns intermediate nodes from a given
model as dictionary with user specified keys as strings, and the requested
outputs as values. This is achieved by re-writing the computation graph of
the model via FX to return the desired nodes as outputs. All unused nodes
are removed, together with their corresponding parameters.
Desired output nodes must be specified as a ``.`` separated
path walking the module hierarchy from top level module down to leaf
operation or leaf module. For more details on the node naming conventions
used here, please see the :ref:`relevant subheading <about-node-names>`
in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
Not all models will be FX traceable, although with some massaging they can
be made to cooperate. Here's a (not exhaustive) list of tips:
- If you don't need to trace through a particular, problematic
sub-module, turn it into a "leaf module" by passing a list of
``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
It will not be traced through, but rather, the resulting graph will
hold a reference to that module's forward method.
- Likewise, you may turn functions into leaf functions by passing a
list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
example below).
- Some inbuilt Python functions can be problematic. For instance,
``int`` will raise an error during tracing. You may wrap them in your
own function and then pass that in ``autowrap_functions`` as one of
the ``tracer_kwargs``.
For further information on FX see the
`torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.
Args:
model (nn.Module): model on which we will extract the features
return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
containing the names (or partial names - see note above)
of the nodes for which the activations will be returned. If it is
a ``Dict``, the keys are the node names, and the values
are the user-specified keys for the graph module's returned
dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
node specification strings directly to output names. In the case
that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
this should not be specified.
train_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``eval_return_nodes`` must also be specified,
and ``return_nodes`` should not be specified.
eval_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``train_return_nodes`` must also be specified,
and `return_nodes` should not be specified.
tracer_kwargs (dict, optional): a dictionary of keyword arguments for
``NodePathTracer`` (which passes them onto it's parent class
`torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
By default, it will be set to wrap and make leaf nodes all torchvision ops:
{"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
provided dictionary.
suppress_diff_warning (bool, optional): whether to suppress a warning
when there are discrepancies between the train and eval version of
the graph. Defaults to False.
concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
not be treated as Proxies. According to the `Pytorch docs
<https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
this parameter's API may not be guaranteed.
Examples::
>>> # Feature extraction with resnet
>>> model = torchvision.models.resnet18()
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> model = create_feature_extractor(
>>> model, {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = model(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
>>> # Specifying leaf modules and leaf functions
>>> def leaf_function(x):
>>> # This would raise a TypeError if traced through
>>> return int(x)
>>>
>>> class LeafModule(torch.nn.Module):
>>> def forward(self, x):
>>> # This would raise a TypeError if traced through
>>> int(x.shape[0])
>>> return torch.nn.functional.relu(x + 4)
>>>
>>> class MyModule(torch.nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.conv = torch.nn.Conv2d(3, 1, 3)
>>> self.leaf_module = LeafModule()
>>>
>>> def forward(self, x):
>>> leaf_function(x.shape[0])
>>> x = self.conv(x)
>>> return self.leaf_module(x)
>>>
>>> model = create_feature_extractor(
>>> MyModule(), return_nodes=['leaf_module'],
>>> tracer_kwargs={'leaf_modules': [LeafModule],
>>> 'autowrap_functions': [leaf_function]})
"""
tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
is_training = model.training
if all(arg is None for arg in [return_nodes, train_return_nodes, eval_return_nodes]):
raise ValueError(
"Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified"
)
if (train_return_nodes is None) ^ (eval_return_nodes is None):
raise ValueError(
"If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
)
if not ((return_nodes is None) ^ (train_return_nodes is None)):
raise ValueError("If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified")
# Put *_return_nodes into Dict[str, str] format
def to_strdict(n) -> Dict[str, str]:
if isinstance(n, list):
return {str(i): str(i) for i in n}
return {str(k): str(v) for k, v in n.items()}
if train_return_nodes is None:
return_nodes = to_strdict(return_nodes)
train_return_nodes = deepcopy(return_nodes)
eval_return_nodes = deepcopy(return_nodes)
else:
train_return_nodes = to_strdict(train_return_nodes)
eval_return_nodes = to_strdict(eval_return_nodes)
# Repeat the tracing and graph rewriting for train and eval mode
tracers = {}
graphs = {}
mode_return_nodes: Dict[str, Dict[str, str]] = {"train": train_return_nodes, "eval": eval_return_nodes}
for mode in ["train", "eval"]:
if mode == "train":
model.train()
elif mode == "eval":
model.eval()
# Instantiate our NodePathTracer and use that to trace the model
tracer = NodePathTracer(**tracer_kwargs)
graph = tracer.trace(model, concrete_args=concrete_args)
name = model.__class__.__name__ if isinstance(model, nn.Module) else model.__name__
graph_module = fx.GraphModule(tracer.root, graph, name)
available_nodes = list(tracer.node_to_qualname.values())
# FIXME We don't know if we should expect this to happen
if len(set(available_nodes)) != len(available_nodes):
raise ValueError(
"There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
)
# Check that all outputs in return_nodes are present in the model
for query in mode_return_nodes[mode].keys():
# To check if a query is available we need to check that at least
# one of the available names starts with it up to a .
if not any([re.match(rf"^{query}(\.|$)", n) is not None for n in available_nodes]):
raise ValueError(
f"node: '{query}' is not present in model. Hint: use "
"`get_graph_node_names` to make sure the "
"`return_nodes` you specified are present. It may even "
"be that you need to specify `train_return_nodes` and "
"`eval_return_nodes` separately."
)
# Remove existing output nodes (train mode)
orig_output_nodes = []
for n in reversed(graph_module.graph.nodes):
if n.op == "output":
orig_output_nodes.append(n)
if not orig_output_nodes:
raise ValueError("No output nodes found in graph_module.graph.nodes")
for n in orig_output_nodes:
graph_module.graph.erase_node(n)
# Find nodes corresponding to return_nodes and make them into output_nodes
nodes = [n for n in graph_module.graph.nodes]
output_nodes = OrderedDict()
for n in reversed(nodes):
module_qualname = tracer.node_to_qualname.get(n)
if module_qualname is None:
# NOTE - Know cases where this happens:
# - Node representing creation of a tensor constant - probably
# not interesting as a return node
# - When packing outputs into a named tuple like in InceptionV3
continue
for query in mode_return_nodes[mode]:
depth = query.count(".")
if ".".join(module_qualname.split(".")[: depth + 1]) == query:
output_nodes[mode_return_nodes[mode][query]] = n
mode_return_nodes[mode].pop(query)
break
output_nodes = OrderedDict(reversed(list(output_nodes.items())))
# And add them in the end of the graph
with graph_module.graph.inserting_after(nodes[-1]):
graph_module.graph.output(output_nodes)
# Remove unused modules / parameters
graph_module.graph.eliminate_dead_code()
graph_module.recompile()
# Keep track of the tracer and graph, so we can choose the main one
tracers[mode] = tracer
graphs[mode] = graph
# Warn user if there are any discrepancies between the graphs of the
# train and eval modes
if not suppress_diff_warning:
_warn_graph_differences(tracers["train"], tracers["eval"])
# Build the final graph module
graph_module = DualGraphModule(model, graphs["train"], graphs["eval"], class_name=name)
# Restore original training mode
model.train(is_training)
graph_module.train(is_training)
return graph_module
```
|
=======================================================================================================================
SOURCE CODE FILE: googlenet.py
LINES: 1
SIZE: 12.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\googlenet.py
ENCODING: utf-8
```py
import warnings
from collections import namedtuple
from functools import partial
from typing import Any, Callable, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["GoogLeNet", "GoogLeNetOutputs", "_GoogLeNetOutputs", "GoogLeNet_Weights", "googlenet"]
GoogLeNetOutputs = namedtuple("GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"])
GoogLeNetOutputs.__annotations__ = {"logits": Tensor, "aux_logits2": Optional[Tensor], "aux_logits1": Optional[Tensor]}
# Script annotations failed with _GoogleNetOutputs = namedtuple ...
# _GoogLeNetOutputs set here for backwards compat
_GoogLeNetOutputs = GoogLeNetOutputs
class GoogLeNet(nn.Module):
__constants__ = ["aux_logits", "transform_input"]
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
init_weights: Optional[bool] = None,
blocks: Optional[List[Callable[..., nn.Module]]] = None,
dropout: float = 0.2,
dropout_aux: float = 0.7,
) -> None:
super().__init__()
_log_api_usage_once(self)
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
if init_weights is None:
warnings.warn(
"The default weight initialization of GoogleNet will be changed in future releases of "
"torchvision. If you wish to keep the old behavior (which leads to long initialization times"
" due to scipy/scipy#11299), please set init_weights=True.",
FutureWarning,
)
init_weights = True
if len(blocks) != 3:
raise ValueError(f"blocks length should be 3 instead of {len(blocks)}")
conv_block = blocks[0]
inception_block = blocks[1]
inception_aux_block = blocks[2]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = conv_block(64, 64, kernel_size=1)
self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = inception_aux_block(512, num_classes, dropout=dropout_aux)
self.aux2 = inception_aux_block(528, num_classes, dropout=dropout_aux)
else:
self.aux1 = None # type: ignore[assignment]
self.aux2 = None # type: ignore[assignment]
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(p=dropout)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
torch.nn.init.trunc_normal_(m.weight, mean=0.0, std=0.01, a=-2, b=2)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _transform_input(self, x: Tensor) -> Tensor:
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
aux1: Optional[Tensor] = None
if self.aux1 is not None:
if self.training:
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
aux2: Optional[Tensor] = None
if self.aux2 is not None:
if self.training:
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
return x, aux2, aux1
@torch.jit.unused
def eager_outputs(self, x: Tensor, aux2: Tensor, aux1: Optional[Tensor]) -> GoogLeNetOutputs:
if self.training and self.aux_logits:
return _GoogLeNetOutputs(x, aux2, aux1)
else:
return x # type: ignore[return-value]
def forward(self, x: Tensor) -> GoogLeNetOutputs:
x = self._transform_input(x)
x, aux2, aux1 = self._forward(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted GoogleNet always returns GoogleNetOutputs Tuple")
return GoogLeNetOutputs(x, aux2, aux1)
else:
return self.eager_outputs(x, aux2, aux1)
class Inception(nn.Module):
def __init__(
self,
in_channels: int,
ch1x1: int,
ch3x3red: int,
ch3x3: int,
ch5x5red: int,
ch5x5: int,
pool_proj: int,
conv_block: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
conv_block(in_channels, ch3x3red, kernel_size=1), conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1)
)
self.branch3 = nn.Sequential(
conv_block(in_channels, ch5x5red, kernel_size=1),
# Here, kernel_size=3 instead of kernel_size=5 is a known bug.
# Please see https://github.com/pytorch/vision/issues/906 for details.
conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1),
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
conv_block(in_channels, pool_proj, kernel_size=1),
)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(
self,
in_channels: int,
num_classes: int,
conv_block: Optional[Callable[..., nn.Module]] = None,
dropout: float = 0.7,
) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv = conv_block(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x: Tensor) -> Tensor:
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 1024
x = self.dropout(x)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class GoogLeNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/googlenet-1378be20.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 6624904,
"min_size": (15, 15),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#googlenet",
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.778,
"acc@5": 89.530,
}
},
"_ops": 1.498,
"_file_size": 49.731,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", GoogLeNet_Weights.IMAGENET1K_V1))
def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = True, **kwargs: Any) -> GoogLeNet:
"""GoogLeNet (Inception v1) model architecture from
`Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`_.
Args:
weights (:class:`~torchvision.models.GoogLeNet_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.GoogLeNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.GoogLeNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.GoogLeNet_Weights
:members:
"""
weights = GoogLeNet_Weights.verify(weights)
original_aux_logits = kwargs.get("aux_logits", False)
if weights is not None:
if "transform_input" not in kwargs:
_ovewrite_named_param(kwargs, "transform_input", True)
_ovewrite_named_param(kwargs, "aux_logits", True)
_ovewrite_named_param(kwargs, "init_weights", False)
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = GoogLeNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if not original_aux_logits:
model.aux_logits = False
model.aux1 = None # type: ignore[assignment]
model.aux2 = None # type: ignore[assignment]
else:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
)
return model
```
|
=======================================================================================================================
SOURCE CODE FILE: inception.py
LINES: 1
SIZE: 18.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\inception.py
ENCODING: utf-8
```py
import warnings
from collections import namedtuple
from functools import partial
from typing import Any, Callable, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["Inception3", "InceptionOutputs", "_InceptionOutputs", "Inception_V3_Weights", "inception_v3"]
InceptionOutputs = namedtuple("InceptionOutputs", ["logits", "aux_logits"])
InceptionOutputs.__annotations__ = {"logits": Tensor, "aux_logits": Optional[Tensor]}
# Script annotations failed with _GoogleNetOutputs = namedtuple ...
# _InceptionOutputs set here for backwards compat
_InceptionOutputs = InceptionOutputs
class Inception3(nn.Module):
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
inception_blocks: Optional[List[Callable[..., nn.Module]]] = None,
init_weights: Optional[bool] = None,
dropout: float = 0.5,
) -> None:
super().__init__()
_log_api_usage_once(self)
if inception_blocks is None:
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
if init_weights is None:
warnings.warn(
"The default weight initialization of inception_v3 will be changed in future releases of "
"torchvision. If you wish to keep the old behavior (which leads to long initialization times"
" due to scipy/scipy#11299), please set init_weights=True.",
FutureWarning,
)
init_weights = True
if len(inception_blocks) != 7:
raise ValueError(f"length of inception_blocks should be 7 instead of {len(inception_blocks)}")
conv_block = inception_blocks[0]
inception_a = inception_blocks[1]
inception_b = inception_blocks[2]
inception_c = inception_blocks[3]
inception_d = inception_blocks[4]
inception_e = inception_blocks[5]
inception_aux = inception_blocks[6]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Mixed_5b = inception_a(192, pool_features=32)
self.Mixed_5c = inception_a(256, pool_features=64)
self.Mixed_5d = inception_a(288, pool_features=64)
self.Mixed_6a = inception_b(288)
self.Mixed_6b = inception_c(768, channels_7x7=128)
self.Mixed_6c = inception_c(768, channels_7x7=160)
self.Mixed_6d = inception_c(768, channels_7x7=160)
self.Mixed_6e = inception_c(768, channels_7x7=192)
self.AuxLogits: Optional[nn.Module] = None
if aux_logits:
self.AuxLogits = inception_aux(768, num_classes)
self.Mixed_7a = inception_d(768)
self.Mixed_7b = inception_e(1280)
self.Mixed_7c = inception_e(2048)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(p=dropout)
self.fc = nn.Linear(2048, num_classes)
if init_weights:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
stddev = float(m.stddev) if hasattr(m, "stddev") else 0.1 # type: ignore
torch.nn.init.trunc_normal_(m.weight, mean=0.0, std=stddev, a=-2, b=2)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _transform_input(self, x: Tensor) -> Tensor:
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = self.maxpool1(x)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = self.maxpool2(x)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
aux: Optional[Tensor] = None
if self.AuxLogits is not None:
if self.training:
aux = self.AuxLogits(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = self.avgpool(x)
# N x 2048 x 1 x 1
x = self.dropout(x)
# N x 2048 x 1 x 1
x = torch.flatten(x, 1)
# N x 2048
x = self.fc(x)
# N x 1000 (num_classes)
return x, aux
@torch.jit.unused
def eager_outputs(self, x: Tensor, aux: Optional[Tensor]) -> InceptionOutputs:
if self.training and self.aux_logits:
return InceptionOutputs(x, aux)
else:
return x # type: ignore[return-value]
def forward(self, x: Tensor) -> InceptionOutputs:
x = self._transform_input(x)
x, aux = self._forward(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted Inception3 always returns Inception3 Tuple")
return InceptionOutputs(x, aux)
else:
return self.eager_outputs(x, aux)
class InceptionA(nn.Module):
def __init__(
self, in_channels: int, pool_features: int, conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
def _forward(self, x: Tensor) -> List[Tensor]:
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(
self, in_channels: int, channels_7x7: int, conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
def _forward(self, x: Tensor) -> List[Tensor]:
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(
self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv0 = conv_block(in_channels, 128, kernel_size=1)
self.conv1 = conv_block(128, 768, kernel_size=5)
self.conv1.stddev = 0.01 # type: ignore[assignment]
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001 # type: ignore[assignment]
def forward(self, x: Tensor) -> Tensor:
# N x 768 x 17 x 17
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 768 x 1 x 1
x = torch.flatten(x, 1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class Inception_V3_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth",
transforms=partial(ImageClassification, crop_size=299, resize_size=342),
meta={
"num_params": 27161264,
"min_size": (75, 75),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#inception-v3",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.294,
"acc@5": 93.450,
}
},
"_ops": 5.713,
"_file_size": 103.903,
"_docs": """These weights are ported from the original paper.""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", Inception_V3_Weights.IMAGENET1K_V1))
def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bool = True, **kwargs: Any) -> Inception3:
"""
Inception v3 model architecture from
`Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
weights (:class:`~torchvision.models.Inception_V3_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.Inception_V3_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.Inception3``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Inception_V3_Weights
:members:
"""
weights = Inception_V3_Weights.verify(weights)
original_aux_logits = kwargs.get("aux_logits", True)
if weights is not None:
if "transform_input" not in kwargs:
_ovewrite_named_param(kwargs, "transform_input", True)
_ovewrite_named_param(kwargs, "aux_logits", True)
_ovewrite_named_param(kwargs, "init_weights", False)
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = Inception3(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
return model
```
|
====================================================================================================================
SOURCE CODE FILE: maxvit.py
LINES: 2
SIZE: 32.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\maxvit.py
ENCODING: utf-8
```py
import math
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, List, Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchvision.models._api import register_model, Weights, WeightsEnum
from torchvision.models._meta import _IMAGENET_CATEGORIES
from torchvision.models._utils import _ovewrite_named_param, handle_legacy_interface
from torchvision.ops.misc import Conv2dNormActivation, SqueezeExcitation
from torchvision.ops.stochastic_depth import StochasticDepth
from torchvision.transforms._presets import ImageClassification, InterpolationMode
from torchvision.utils import _log_api_usage_once
__all__ = [
"MaxVit",
"MaxVit_T_Weights",
"maxvit_t",
]
def _get_conv_output_shape(input_size: Tuple[int, int], kernel_size: int, stride: int, padding: int) -> Tuple[int, int]:
return (
(input_size[0] - kernel_size + 2 * padding) // stride + 1,
(input_size[1] - kernel_size + 2 * padding) // stride + 1,
)
def _make_block_input_shapes(input_size: Tuple[int, int], n_blocks: int) -> List[Tuple[int, int]]:
"""Util function to check that the input size is correct for a MaxVit configuration."""
shapes = []
block_input_shape = _get_conv_output_shape(input_size, 3, 2, 1)
for _ in range(n_blocks):
block_input_shape = _get_conv_output_shape(block_input_shape, 3, 2, 1)
shapes.append(block_input_shape)
return shapes
def _get_relative_position_index(height: int, width: int) -> torch.Tensor:
coords = torch.stack(torch.meshgrid([torch.arange(height), torch.arange(width)], indexing="ij"))
coords_flat = torch.flatten(coords, 1)
relative_coords = coords_flat[:, :, None] - coords_flat[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += height - 1
relative_coords[:, :, 1] += width - 1
relative_coords[:, :, 0] *= 2 * width - 1
return relative_coords.sum(-1)
class MBConv(nn.Module):
"""MBConv: Mobile Inverted Residual Bottleneck.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
expansion_ratio (float): Expansion ratio in the bottleneck.
squeeze_ratio (float): Squeeze ratio in the SE Layer.
stride (int): Stride of the depthwise convolution.
activation_layer (Callable[..., nn.Module]): Activation function.
norm_layer (Callable[..., nn.Module]): Normalization function.
p_stochastic_dropout (float): Probability of stochastic depth.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
expansion_ratio: float,
squeeze_ratio: float,
stride: int,
activation_layer: Callable[..., nn.Module],
norm_layer: Callable[..., nn.Module],
p_stochastic_dropout: float = 0.0,
) -> None:
super().__init__()
proj: Sequence[nn.Module]
self.proj: nn.Module
should_proj = stride != 1 or in_channels != out_channels
if should_proj:
proj = [nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=True)]
if stride == 2:
proj = [nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)] + proj # type: ignore
self.proj = nn.Sequential(*proj)
else:
self.proj = nn.Identity() # type: ignore
mid_channels = int(out_channels * expansion_ratio)
sqz_channels = int(out_channels * squeeze_ratio)
if p_stochastic_dropout:
self.stochastic_depth = StochasticDepth(p_stochastic_dropout, mode="row") # type: ignore
else:
self.stochastic_depth = nn.Identity() # type: ignore
_layers = OrderedDict()
_layers["pre_norm"] = norm_layer(in_channels)
_layers["conv_a"] = Conv2dNormActivation(
in_channels,
mid_channels,
kernel_size=1,
stride=1,
padding=0,
activation_layer=activation_layer,
norm_layer=norm_layer,
inplace=None,
)
_layers["conv_b"] = Conv2dNormActivation(
mid_channels,
mid_channels,
kernel_size=3,
stride=stride,
padding=1,
activation_layer=activation_layer,
norm_layer=norm_layer,
groups=mid_channels,
inplace=None,
)
_layers["squeeze_excitation"] = SqueezeExcitation(mid_channels, sqz_channels, activation=nn.SiLU)
_layers["conv_c"] = nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, bias=True)
self.layers = nn.Sequential(_layers)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, C, H, W].
Returns:
Tensor: Output tensor with expected layout of [B, C, H / stride, W / stride].
"""
res = self.proj(x)
x = self.stochastic_depth(self.layers(x))
return res + x
class RelativePositionalMultiHeadAttention(nn.Module):
"""Relative Positional Multi-Head Attention.
Args:
feat_dim (int): Number of input features.
head_dim (int): Number of features per head.
max_seq_len (int): Maximum sequence length.
"""
def __init__(
self,
feat_dim: int,
head_dim: int,
max_seq_len: int,
) -> None:
super().__init__()
if feat_dim % head_dim != 0:
raise ValueError(f"feat_dim: {feat_dim} must be divisible by head_dim: {head_dim}")
self.n_heads = feat_dim // head_dim
self.head_dim = head_dim
self.size = int(math.sqrt(max_seq_len))
self.max_seq_len = max_seq_len
self.to_qkv = nn.Linear(feat_dim, self.n_heads * self.head_dim * 3)
self.scale_factor = feat_dim**-0.5
self.merge = nn.Linear(self.head_dim * self.n_heads, feat_dim)
self.relative_position_bias_table = nn.parameter.Parameter(
torch.empty(((2 * self.size - 1) * (2 * self.size - 1), self.n_heads), dtype=torch.float32),
)
self.register_buffer("relative_position_index", _get_relative_position_index(self.size, self.size))
# initialize with truncated normal the bias
torch.nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02)
def get_relative_positional_bias(self) -> torch.Tensor:
bias_index = self.relative_position_index.view(-1) # type: ignore
relative_bias = self.relative_position_bias_table[bias_index].view(self.max_seq_len, self.max_seq_len, -1) # type: ignore
relative_bias = relative_bias.permute(2, 0, 1).contiguous()
return relative_bias.unsqueeze(0)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, G, P, D].
Returns:
Tensor: Output tensor with expected layout of [B, G, P, D].
"""
B, G, P, D = x.shape
H, DH = self.n_heads, self.head_dim
qkv = self.to_qkv(x)
q, k, v = torch.chunk(qkv, 3, dim=-1)
q = q.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
k = k.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
v = v.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
k = k * self.scale_factor
dot_prod = torch.einsum("B G H I D, B G H J D -> B G H I J", q, k)
pos_bias = self.get_relative_positional_bias()
dot_prod = F.softmax(dot_prod + pos_bias, dim=-1)
out = torch.einsum("B G H I J, B G H J D -> B G H I D", dot_prod, v)
out = out.permute(0, 1, 3, 2, 4).reshape(B, G, P, D)
out = self.merge(out)
return out
class SwapAxes(nn.Module):
"""Permute the axes of a tensor."""
def __init__(self, a: int, b: int) -> None:
super().__init__()
self.a = a
self.b = b
def forward(self, x: torch.Tensor) -> torch.Tensor:
res = torch.swapaxes(x, self.a, self.b)
return res
class WindowPartition(nn.Module):
"""
Partition the input tensor into non-overlapping windows.
"""
def __init__(self) -> None:
super().__init__()
def forward(self, x: Tensor, p: int) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, C, H, W].
p (int): Number of partitions.
Returns:
Tensor: Output tensor with expected layout of [B, H/P, W/P, P*P, C].
"""
B, C, H, W = x.shape
P = p
# chunk up H and W dimensions
x = x.reshape(B, C, H // P, P, W // P, P)
x = x.permute(0, 2, 4, 3, 5, 1)
# colapse P * P dimension
x = x.reshape(B, (H // P) * (W // P), P * P, C)
return x
class WindowDepartition(nn.Module):
"""
Departition the input tensor of non-overlapping windows into a feature volume of layout [B, C, H, W].
"""
def __init__(self) -> None:
super().__init__()
def forward(self, x: Tensor, p: int, h_partitions: int, w_partitions: int) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, (H/P * W/P), P*P, C].
p (int): Number of partitions.
h_partitions (int): Number of vertical partitions.
w_partitions (int): Number of horizontal partitions.
Returns:
Tensor: Output tensor with expected layout of [B, C, H, W].
"""
B, G, PP, C = x.shape
P = p
HP, WP = h_partitions, w_partitions
# split P * P dimension into 2 P tile dimensionsa
x = x.reshape(B, HP, WP, P, P, C)
# permute into B, C, HP, P, WP, P
x = x.permute(0, 5, 1, 3, 2, 4)
# reshape into B, C, H, W
x = x.reshape(B, C, HP * P, WP * P)
return x
class PartitionAttentionLayer(nn.Module):
"""
Layer for partitioning the input tensor into non-overlapping windows and applying attention to each window.
Args:
in_channels (int): Number of input channels.
head_dim (int): Dimension of each attention head.
partition_size (int): Size of the partitions.
partition_type (str): Type of partitioning to use. Can be either "grid" or "window".
grid_size (Tuple[int, int]): Size of the grid to partition the input tensor into.
mlp_ratio (int): Ratio of the feature size expansion in the MLP layer.
activation_layer (Callable[..., nn.Module]): Activation function to use.
norm_layer (Callable[..., nn.Module]): Normalization function to use.
attention_dropout (float): Dropout probability for the attention layer.
mlp_dropout (float): Dropout probability for the MLP layer.
p_stochastic_dropout (float): Probability of dropping out a partition.
"""
def __init__(
self,
in_channels: int,
head_dim: int,
# partitioning parameters
partition_size: int,
partition_type: str,
# grid size needs to be known at initialization time
# because we need to know hamy relative offsets there are in the grid
grid_size: Tuple[int, int],
mlp_ratio: int,
activation_layer: Callable[..., nn.Module],
norm_layer: Callable[..., nn.Module],
attention_dropout: float,
mlp_dropout: float,
p_stochastic_dropout: float,
) -> None:
super().__init__()
self.n_heads = in_channels // head_dim
self.head_dim = head_dim
self.n_partitions = grid_size[0] // partition_size
self.partition_type = partition_type
self.grid_size = grid_size
if partition_type not in ["grid", "window"]:
raise ValueError("partition_type must be either 'grid' or 'window'")
if partition_type == "window":
self.p, self.g = partition_size, self.n_partitions
else:
self.p, self.g = self.n_partitions, partition_size
self.partition_op = WindowPartition()
self.departition_op = WindowDepartition()
self.partition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
self.departition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
self.attn_layer = nn.Sequential(
norm_layer(in_channels),
# it's always going to be partition_size ** 2 because
# of the axis swap in the case of grid partitioning
RelativePositionalMultiHeadAttention(in_channels, head_dim, partition_size**2),
nn.Dropout(attention_dropout),
)
# pre-normalization similar to transformer layers
self.mlp_layer = nn.Sequential(
nn.LayerNorm(in_channels),
nn.Linear(in_channels, in_channels * mlp_ratio),
activation_layer(),
nn.Linear(in_channels * mlp_ratio, in_channels),
nn.Dropout(mlp_dropout),
)
# layer scale factors
self.stochastic_dropout = StochasticDepth(p_stochastic_dropout, mode="row")
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, C, H, W].
Returns:
Tensor: Output tensor with expected layout of [B, C, H, W].
"""
# Undefined behavior if H or W are not divisible by p
# https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
gh, gw = self.grid_size[0] // self.p, self.grid_size[1] // self.p
torch._assert(
self.grid_size[0] % self.p == 0 and self.grid_size[1] % self.p == 0,
"Grid size must be divisible by partition size. Got grid size of {} and partition size of {}".format(
self.grid_size, self.p
),
)
x = self.partition_op(x, self.p)
x = self.partition_swap(x)
x = x + self.stochastic_dropout(self.attn_layer(x))
x = x + self.stochastic_dropout(self.mlp_layer(x))
x = self.departition_swap(x)
x = self.departition_op(x, self.p, gh, gw)
return x
class MaxVitLayer(nn.Module):
"""
MaxVit layer consisting of a MBConv layer followed by a PartitionAttentionLayer with `window` and a PartitionAttentionLayer with `grid`.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
expansion_ratio (float): Expansion ratio in the bottleneck.
squeeze_ratio (float): Squeeze ratio in the SE Layer.
stride (int): Stride of the depthwise convolution.
activation_layer (Callable[..., nn.Module]): Activation function.
norm_layer (Callable[..., nn.Module]): Normalization function.
head_dim (int): Dimension of the attention heads.
mlp_ratio (int): Ratio of the MLP layer.
mlp_dropout (float): Dropout probability for the MLP layer.
attention_dropout (float): Dropout probability for the attention layer.
p_stochastic_dropout (float): Probability of stochastic depth.
partition_size (int): Size of the partitions.
grid_size (Tuple[int, int]): Size of the input feature grid.
"""
def __init__(
self,
# conv parameters
in_channels: int,
out_channels: int,
squeeze_ratio: float,
expansion_ratio: float,
stride: int,
# conv + transformer parameters
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
# transformer parameters
head_dim: int,
mlp_ratio: int,
mlp_dropout: float,
attention_dropout: float,
p_stochastic_dropout: float,
# partitioning parameters
partition_size: int,
grid_size: Tuple[int, int],
) -> None:
super().__init__()
layers: OrderedDict = OrderedDict()
# convolutional layer
layers["MBconv"] = MBConv(
in_channels=in_channels,
out_channels=out_channels,
expansion_ratio=expansion_ratio,
squeeze_ratio=squeeze_ratio,
stride=stride,
activation_layer=activation_layer,
norm_layer=norm_layer,
p_stochastic_dropout=p_stochastic_dropout,
)
# attention layers, block -> grid
layers["window_attention"] = PartitionAttentionLayer(
in_channels=out_channels,
head_dim=head_dim,
partition_size=partition_size,
partition_type="window",
grid_size=grid_size,
mlp_ratio=mlp_ratio,
activation_layer=activation_layer,
norm_layer=nn.LayerNorm,
attention_dropout=attention_dropout,
mlp_dropout=mlp_dropout,
p_stochastic_dropout=p_stochastic_dropout,
)
layers["grid_attention"] = PartitionAttentionLayer(
in_channels=out_channels,
head_dim=head_dim,
partition_size=partition_size,
partition_type="grid",
grid_size=grid_size,
mlp_ratio=mlp_ratio,
activation_layer=activation_layer,
norm_layer=nn.LayerNorm,
attention_dropout=attention_dropout,
mlp_dropout=mlp_dropout,
p_stochastic_dropout=p_stochastic_dropout,
)
self.layers = nn.Sequential(layers)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor of shape (B, C, H, W).
Returns:
Tensor: Output tensor of shape (B, C, H, W).
"""
x = self.layers(x)
return x
class MaxVitBlock(nn.Module):
"""
A MaxVit block consisting of `n_layers` MaxVit layers.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
expansion_ratio (float): Expansion ratio in the bottleneck.
squeeze_ratio (float): Squeeze ratio in the SE Layer.
activation_layer (Callable[..., nn.Module]): Activation function.
norm_layer (Callable[..., nn.Module]): Normalization function.
head_dim (int): Dimension of the attention heads.
mlp_ratio (int): Ratio of the MLP layer.
mlp_dropout (float): Dropout probability for the MLP layer.
attention_dropout (float): Dropout probability for the attention layer.
p_stochastic_dropout (float): Probability of stochastic depth.
partition_size (int): Size of the partitions.
input_grid_size (Tuple[int, int]): Size of the input feature grid.
n_layers (int): Number of layers in the block.
p_stochastic (List[float]): List of probabilities for stochastic depth for each layer.
"""
def __init__(
self,
# conv parameters
in_channels: int,
out_channels: int,
squeeze_ratio: float,
expansion_ratio: float,
# conv + transformer parameters
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
# transformer parameters
head_dim: int,
mlp_ratio: int,
mlp_dropout: float,
attention_dropout: float,
# partitioning parameters
partition_size: int,
input_grid_size: Tuple[int, int],
# number of layers
n_layers: int,
p_stochastic: List[float],
) -> None:
super().__init__()
if not len(p_stochastic) == n_layers:
raise ValueError(f"p_stochastic must have length n_layers={n_layers}, got p_stochastic={p_stochastic}.")
self.layers = nn.ModuleList()
# account for the first stride of the first layer
self.grid_size = _get_conv_output_shape(input_grid_size, kernel_size=3, stride=2, padding=1)
for idx, p in enumerate(p_stochastic):
stride = 2 if idx == 0 else 1
self.layers += [
MaxVitLayer(
in_channels=in_channels if idx == 0 else out_channels,
out_channels=out_channels,
squeeze_ratio=squeeze_ratio,
expansion_ratio=expansion_ratio,
stride=stride,
norm_layer=norm_layer,
activation_layer=activation_layer,
head_dim=head_dim,
mlp_ratio=mlp_ratio,
mlp_dropout=mlp_dropout,
attention_dropout=attention_dropout,
partition_size=partition_size,
grid_size=self.grid_size,
p_stochastic_dropout=p,
),
]
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor of shape (B, C, H, W).
Returns:
Tensor: Output tensor of shape (B, C, H, W).
"""
for layer in self.layers:
x = layer(x)
return x
class MaxVit(nn.Module):
"""
Implements MaxVit Transformer from the `MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_ paper.
Args:
input_size (Tuple[int, int]): Size of the input image.
stem_channels (int): Number of channels in the stem.
partition_size (int): Size of the partitions.
block_channels (List[int]): Number of channels in each block.
block_layers (List[int]): Number of layers in each block.
stochastic_depth_prob (float): Probability of stochastic depth. Expands to a list of probabilities for each layer that scales linearly to the specified value.
squeeze_ratio (float): Squeeze ratio in the SE Layer. Default: 0.25.
expansion_ratio (float): Expansion ratio in the MBConv bottleneck. Default: 4.
norm_layer (Callable[..., nn.Module]): Normalization function. Default: None (setting to None will produce a `BatchNorm2d(eps=1e-3, momentum=0.01)`).
activation_layer (Callable[..., nn.Module]): Activation function Default: nn.GELU.
head_dim (int): Dimension of the attention heads.
mlp_ratio (int): Expansion ratio of the MLP layer. Default: 4.
mlp_dropout (float): Dropout probability for the MLP layer. Default: 0.0.
attention_dropout (float): Dropout probability for the attention layer. Default: 0.0.
num_classes (int): Number of classes. Default: 1000.
"""
def __init__(
self,
# input size parameters
input_size: Tuple[int, int],
# stem and task parameters
stem_channels: int,
# partitioning parameters
partition_size: int,
# block parameters
block_channels: List[int],
block_layers: List[int],
# attention head dimensions
head_dim: int,
stochastic_depth_prob: float,
# conv + transformer parameters
# norm_layer is applied only to the conv layers
# activation_layer is applied both to conv and transformer layers
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Callable[..., nn.Module] = nn.GELU,
# conv parameters
squeeze_ratio: float = 0.25,
expansion_ratio: float = 4,
# transformer parameters
mlp_ratio: int = 4,
mlp_dropout: float = 0.0,
attention_dropout: float = 0.0,
# task parameters
num_classes: int = 1000,
) -> None:
super().__init__()
_log_api_usage_once(self)
input_channels = 3
# https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1029-L1030
# for the exact parameters used in batchnorm
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.01)
# Make sure input size will be divisible by the partition size in all blocks
# Undefined behavior if H or W are not divisible by p
# https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
block_input_sizes = _make_block_input_shapes(input_size, len(block_channels))
for idx, block_input_size in enumerate(block_input_sizes):
if block_input_size[0] % partition_size != 0 or block_input_size[1] % partition_size != 0:
raise ValueError(
f"Input size {block_input_size} of block {idx} is not divisible by partition size {partition_size}. "
f"Consider changing the partition size or the input size.\n"
f"Current configuration yields the following block input sizes: {block_input_sizes}."
)
# stem
self.stem = nn.Sequential(
Conv2dNormActivation(
input_channels,
stem_channels,
3,
stride=2,
norm_layer=norm_layer,
activation_layer=activation_layer,
bias=False,
inplace=None,
),
Conv2dNormActivation(
stem_channels, stem_channels, 3, stride=1, norm_layer=None, activation_layer=None, bias=True
),
)
# account for stem stride
input_size = _get_conv_output_shape(input_size, kernel_size=3, stride=2, padding=1)
self.partition_size = partition_size
# blocks
self.blocks = nn.ModuleList()
in_channels = [stem_channels] + block_channels[:-1]
out_channels = block_channels
# precompute the stochastich depth probabilities from 0 to stochastic_depth_prob
# since we have N blocks with L layers, we will have N * L probabilities uniformly distributed
# over the range [0, stochastic_depth_prob]
p_stochastic = np.linspace(0, stochastic_depth_prob, sum(block_layers)).tolist()
p_idx = 0
for in_channel, out_channel, num_layers in zip(in_channels, out_channels, block_layers):
self.blocks.append(
MaxVitBlock(
in_channels=in_channel,
out_channels=out_channel,
squeeze_ratio=squeeze_ratio,
expansion_ratio=expansion_ratio,
norm_layer=norm_layer,
activation_layer=activation_layer,
head_dim=head_dim,
mlp_ratio=mlp_ratio,
mlp_dropout=mlp_dropout,
attention_dropout=attention_dropout,
partition_size=partition_size,
input_grid_size=input_size,
n_layers=num_layers,
p_stochastic=p_stochastic[p_idx : p_idx + num_layers],
),
)
input_size = self.blocks[-1].grid_size # type: ignore[assignment]
p_idx += num_layers
# see https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1137-L1158
# for why there is Linear -> Tanh -> Linear
self.classifier = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.LayerNorm(block_channels[-1]),
nn.Linear(block_channels[-1], block_channels[-1]),
nn.Tanh(),
nn.Linear(block_channels[-1], num_classes, bias=False),
)
self._init_weights()
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
for block in self.blocks:
x = block(x)
x = self.classifier(x)
return x
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
def _maxvit(
# stem parameters
stem_channels: int,
# block parameters
block_channels: List[int],
block_layers: List[int],
stochastic_depth_prob: float,
# partitioning parameters
partition_size: int,
# transformer parameters
head_dim: int,
# Weights API
weights: Optional[WeightsEnum] = None,
progress: bool = False,
# kwargs,
**kwargs: Any,
) -> MaxVit:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
assert weights.meta["min_size"][0] == weights.meta["min_size"][1]
_ovewrite_named_param(kwargs, "input_size", weights.meta["min_size"])
input_size = kwargs.pop("input_size", (224, 224))
model = MaxVit(
stem_channels=stem_channels,
block_channels=block_channels,
block_layers=block_layers,
stochastic_depth_prob=stochastic_depth_prob,
head_dim=head_dim,
partition_size=partition_size,
input_size=input_size,
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
class MaxVit_T_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# URL empty until official release
url="https://download.pytorch.org/models/maxvit_t-bc5ab103.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
"categories": _IMAGENET_CATEGORIES,
"num_params": 30919624,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#maxvit",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.700,
"acc@5": 96.722,
}
},
"_ops": 5.558,
"_file_size": 118.769,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.
They were trained with a BatchNorm2D momentum of 0.99 instead of the more correct 0.01.""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", MaxVit_T_Weights.IMAGENET1K_V1))
def maxvit_t(*, weights: Optional[MaxVit_T_Weights] = None, progress: bool = True, **kwargs: Any) -> MaxVit:
"""
Constructs a maxvit_t architecture from
`MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_.
Args:
weights (:class:`~torchvision.models.MaxVit_T_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MaxVit_T_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.maxvit.MaxVit``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/maxvit.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MaxVit_T_Weights
:members:
"""
weights = MaxVit_T_Weights.verify(weights)
return _maxvit(
stem_channels=64,
block_channels=[64, 128, 256, 512],
block_layers=[2, 2, 5, 2],
head_dim=32,
stochastic_depth_prob=0.2,
partition_size=7,
weights=weights,
progress=progress,
**kwargs,
)
```
|
=====================================================================================================================
SOURCE CODE FILE: mnasnet.py
LINES: 1
SIZE: 17.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\mnasnet.py
ENCODING: utf-8
```py
import warnings
from functools import partial
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"MNASNet",
"MNASNet0_5_Weights",
"MNASNet0_75_Weights",
"MNASNet1_0_Weights",
"MNASNet1_3_Weights",
"mnasnet0_5",
"mnasnet0_75",
"mnasnet1_0",
"mnasnet1_3",
]
# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is
# 1.0 - tensorflow.
_BN_MOMENTUM = 1 - 0.9997
class _InvertedResidual(nn.Module):
def __init__(
self, in_ch: int, out_ch: int, kernel_size: int, stride: int, expansion_factor: int, bn_momentum: float = 0.1
) -> None:
super().__init__()
if stride not in [1, 2]:
raise ValueError(f"stride should be 1 or 2 instead of {stride}")
if kernel_size not in [3, 5]:
raise ValueError(f"kernel_size should be 3 or 5 instead of {kernel_size}")
mid_ch = in_ch * expansion_factor
self.apply_residual = in_ch == out_ch and stride == 1
self.layers = nn.Sequential(
# Pointwise
nn.Conv2d(in_ch, mid_ch, 1, bias=False),
nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
nn.ReLU(inplace=True),
# Depthwise
nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, stride=stride, groups=mid_ch, bias=False),
nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
nn.ReLU(inplace=True),
# Linear pointwise. Note that there's no activation.
nn.Conv2d(mid_ch, out_ch, 1, bias=False),
nn.BatchNorm2d(out_ch, momentum=bn_momentum),
)
def forward(self, input: Tensor) -> Tensor:
if self.apply_residual:
return self.layers(input) + input
else:
return self.layers(input)
def _stack(
in_ch: int, out_ch: int, kernel_size: int, stride: int, exp_factor: int, repeats: int, bn_momentum: float
) -> nn.Sequential:
"""Creates a stack of inverted residuals."""
if repeats < 1:
raise ValueError(f"repeats should be >= 1, instead got {repeats}")
# First one has no skip, because feature map size changes.
first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum)
remaining = []
for _ in range(1, repeats):
remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum))
return nn.Sequential(first, *remaining)
def _round_to_multiple_of(val: float, divisor: int, round_up_bias: float = 0.9) -> int:
"""Asymmetric rounding to make `val` divisible by `divisor`. With default
bias, will round up, unless the number is no more than 10% greater than the
smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88."""
if not 0.0 < round_up_bias < 1.0:
raise ValueError(f"round_up_bias should be greater than 0.0 and smaller than 1.0 instead of {round_up_bias}")
new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
return new_val if new_val >= round_up_bias * val else new_val + divisor
def _get_depths(alpha: float) -> List[int]:
"""Scales tensor depths as in reference MobileNet code, prefers rounding up
rather than down."""
depths = [32, 16, 24, 40, 80, 96, 192, 320]
return [_round_to_multiple_of(depth * alpha, 8) for depth in depths]
class MNASNet(torch.nn.Module):
"""MNASNet, as described in https://arxiv.org/abs/1807.11626. This
implements the B1 variant of the model.
>>> model = MNASNet(1.0, num_classes=1000)
>>> x = torch.rand(1, 3, 224, 224)
>>> y = model(x)
>>> y.dim()
2
>>> y.nelement()
1000
"""
# Version 2 adds depth scaling in the initial stages of the network.
_version = 2
def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None:
super().__init__()
_log_api_usage_once(self)
if alpha <= 0.0:
raise ValueError(f"alpha should be greater than 0.0 instead of {alpha}")
self.alpha = alpha
self.num_classes = num_classes
depths = _get_depths(alpha)
layers = [
# First layer: regular conv.
nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
# Depthwise separable, no skip.
nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1, groups=depths[0], bias=False),
nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False),
nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM),
# MNASNet blocks: stacks of inverted residuals.
_stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
_stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM),
_stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM),
_stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM),
_stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM),
_stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM),
# Final mapping to classifier input.
nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False),
nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
]
self.layers = nn.Sequential(*layers)
self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), nn.Linear(1280, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode="fan_out", nonlinearity="sigmoid")
nn.init.zeros_(m.bias)
def forward(self, x: Tensor) -> Tensor:
x = self.layers(x)
# Equivalent to global avgpool and removing H and W dimensions.
x = x.mean([2, 3])
return self.classifier(x)
def _load_from_state_dict(
self,
state_dict: Dict,
prefix: str,
local_metadata: Dict,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
version = local_metadata.get("version", None)
if version not in [1, 2]:
raise ValueError(f"version shluld be set to 1 or 2 instead of {version}")
if version == 1 and not self.alpha == 1.0:
# In the initial version of the model (v1), stem was fixed-size.
# All other layer configurations were the same. This will patch
# the model so that it's identical to v1. Model with alpha 1.0 is
# unaffected.
depths = _get_depths(self.alpha)
v1_stem = [
nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, bias=False),
nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False),
nn.BatchNorm2d(16, momentum=_BN_MOMENTUM),
_stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
]
for idx, layer in enumerate(v1_stem):
self.layers[idx] = layer
# The model is now identical to v1, and must be saved as such.
self._version = 1
warnings.warn(
"A new version of MNASNet model has been implemented. "
"Your checkpoint was saved using the previous version. "
"This checkpoint will load and work as before, but "
"you may want to upgrade by training a newer model or "
"transfer learning from an updated ImageNet checkpoint.",
UserWarning,
)
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/1e100/mnasnet_trainer",
}
class MNASNet0_5_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2218512,
"_metrics": {
"ImageNet-1K": {
"acc@1": 67.734,
"acc@5": 87.490,
}
},
"_ops": 0.104,
"_file_size": 8.591,
"_docs": """These weights reproduce closely the results of the paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class MNASNet0_75_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mnasnet0_75-7090bc5f.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/6019",
"num_params": 3170208,
"_metrics": {
"ImageNet-1K": {
"acc@1": 71.180,
"acc@5": 90.496,
}
},
"_ops": 0.215,
"_file_size": 12.303,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
class MNASNet1_0_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 4383312,
"_metrics": {
"ImageNet-1K": {
"acc@1": 73.456,
"acc@5": 91.510,
}
},
"_ops": 0.314,
"_file_size": 16.915,
"_docs": """These weights reproduce closely the results of the paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class MNASNet1_3_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mnasnet1_3-a4c69d6f.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/6019",
"num_params": 6282256,
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.506,
"acc@5": 93.522,
}
},
"_ops": 0.526,
"_file_size": 24.246,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
def _mnasnet(alpha: float, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any) -> MNASNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = MNASNet(alpha, **kwargs)
if weights:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
@register_model()
@handle_legacy_interface(weights=("pretrained", MNASNet0_5_Weights.IMAGENET1K_V1))
def mnasnet0_5(*, weights: Optional[MNASNet0_5_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
"""MNASNet with depth multiplier of 0.5 from
`MnasNet: Platform-Aware Neural Architecture Search for Mobile
<https://arxiv.org/abs/1807.11626>`_ paper.
Args:
weights (:class:`~torchvision.models.MNASNet0_5_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MNASNet0_5_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MNASNet0_5_Weights
:members:
"""
weights = MNASNet0_5_Weights.verify(weights)
return _mnasnet(0.5, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", MNASNet0_75_Weights.IMAGENET1K_V1))
def mnasnet0_75(*, weights: Optional[MNASNet0_75_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
"""MNASNet with depth multiplier of 0.75 from
`MnasNet: Platform-Aware Neural Architecture Search for Mobile
<https://arxiv.org/abs/1807.11626>`_ paper.
Args:
weights (:class:`~torchvision.models.MNASNet0_75_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MNASNet0_75_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MNASNet0_75_Weights
:members:
"""
weights = MNASNet0_75_Weights.verify(weights)
return _mnasnet(0.75, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", MNASNet1_0_Weights.IMAGENET1K_V1))
def mnasnet1_0(*, weights: Optional[MNASNet1_0_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
"""MNASNet with depth multiplier of 1.0 from
`MnasNet: Platform-Aware Neural Architecture Search for Mobile
<https://arxiv.org/abs/1807.11626>`_ paper.
Args:
weights (:class:`~torchvision.models.MNASNet1_0_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MNASNet1_0_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MNASNet1_0_Weights
:members:
"""
weights = MNASNet1_0_Weights.verify(weights)
return _mnasnet(1.0, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", MNASNet1_3_Weights.IMAGENET1K_V1))
def mnasnet1_3(*, weights: Optional[MNASNet1_3_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
"""MNASNet with depth multiplier of 1.3 from
`MnasNet: Platform-Aware Neural Architecture Search for Mobile
<https://arxiv.org/abs/1807.11626>`_ paper.
Args:
weights (:class:`~torchvision.models.MNASNet1_3_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MNASNet1_3_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MNASNet1_3_Weights
:members:
"""
weights = MNASNet1_3_Weights.verify(weights)
return _mnasnet(1.3, weights, progress, **kwargs)
```
|
=======================================================================================================================
SOURCE CODE FILE: mobilenet.py
LINES: 1
SIZE: 0.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\mobilenet.py
ENCODING: utf-8
```py
from .mobilenetv2 import * # noqa: F401, F403
from .mobilenetv3 import * # noqa: F401, F403
from .mobilenetv2 import __all__ as mv2_all
from .mobilenetv3 import __all__ as mv3_all
__all__ = mv2_all + mv3_all
```
|
=========================================================================================================================
SOURCE CODE FILE: mobilenetv2.py
LINES: 1
SIZE: 9.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\mobilenetv2.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Callable, List, Optional
import torch
from torch import nn, Tensor
from ..ops.misc import Conv2dNormActivation
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
__all__ = ["MobileNetV2", "MobileNet_V2_Weights", "mobilenet_v2"]
# necessary for backwards compatibility
class InvertedResidual(nn.Module):
def __init__(
self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
self.stride = stride
if stride not in [1, 2]:
raise ValueError(f"stride should be 1 or 2 instead of {stride}")
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers: List[nn.Module] = []
if expand_ratio != 1:
# pw
layers.append(
Conv2dNormActivation(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6)
)
layers.extend(
[
# dw
Conv2dNormActivation(
hidden_dim,
hidden_dim,
stride=stride,
groups=hidden_dim,
norm_layer=norm_layer,
activation_layer=nn.ReLU6,
),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
]
)
self.conv = nn.Sequential(*layers)
self.out_channels = oup
self._is_cn = stride > 1
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(
self,
num_classes: int = 1000,
width_mult: float = 1.0,
inverted_residual_setting: Optional[List[List[int]]] = None,
round_nearest: int = 8,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
dropout: float = 0.2,
) -> None:
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
dropout (float): The droupout probability
"""
super().__init__()
_log_api_usage_once(self)
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError(
f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}"
)
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features: List[nn.Module] = [
Conv2dNormActivation(3, input_channel, stride=2, norm_layer=norm_layer, activation_layer=nn.ReLU6)
]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(
Conv2dNormActivation(
input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6
)
)
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1
x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
_COMMON_META = {
"num_params": 3504872,
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
}
class MobileNet_V2_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v2-b0353104.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2",
"_metrics": {
"ImageNet-1K": {
"acc@1": 71.878,
"acc@5": 90.286,
}
},
"_ops": 0.301,
"_file_size": 13.555,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.154,
"acc@5": 90.822,
}
},
"_ops": 0.301,
"_file_size": 13.598,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
@register_model()
@handle_legacy_interface(weights=("pretrained", MobileNet_V2_Weights.IMAGENET1K_V1))
def mobilenet_v2(
*, weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV2:
"""MobileNetV2 architecture from the `MobileNetV2: Inverted Residuals and Linear
Bottlenecks <https://arxiv.org/abs/1801.04381>`_ paper.
Args:
weights (:class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MobileNet_V2_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mobilenetv2.MobileNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MobileNet_V2_Weights
:members:
"""
weights = MobileNet_V2_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = MobileNetV2(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
=========================================================================================================================
SOURCE CODE FILE: mobilenetv3.py
LINES: 1
SIZE: 16.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\mobilenetv3.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Callable, List, Optional, Sequence
import torch
from torch import nn, Tensor
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation as SElayer
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
__all__ = [
"MobileNetV3",
"MobileNet_V3_Large_Weights",
"MobileNet_V3_Small_Weights",
"mobilenet_v3_large",
"mobilenet_v3_small",
]
class InvertedResidualConfig:
# Stores information listed at Tables 1 and 2 of the MobileNetV3 paper
def __init__(
self,
input_channels: int,
kernel: int,
expanded_channels: int,
out_channels: int,
use_se: bool,
activation: str,
stride: int,
dilation: int,
width_mult: float,
):
self.input_channels = self.adjust_channels(input_channels, width_mult)
self.kernel = kernel
self.expanded_channels = self.adjust_channels(expanded_channels, width_mult)
self.out_channels = self.adjust_channels(out_channels, width_mult)
self.use_se = use_se
self.use_hs = activation == "HS"
self.stride = stride
self.dilation = dilation
@staticmethod
def adjust_channels(channels: int, width_mult: float):
return _make_divisible(channels * width_mult, 8)
class InvertedResidual(nn.Module):
# Implemented as described at section 5 of MobileNetV3 paper
def __init__(
self,
cnf: InvertedResidualConfig,
norm_layer: Callable[..., nn.Module],
se_layer: Callable[..., nn.Module] = partial(SElayer, scale_activation=nn.Hardsigmoid),
):
super().__init__()
if not (1 <= cnf.stride <= 2):
raise ValueError("illegal stride value")
self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
layers: List[nn.Module] = []
activation_layer = nn.Hardswish if cnf.use_hs else nn.ReLU
# expand
if cnf.expanded_channels != cnf.input_channels:
layers.append(
Conv2dNormActivation(
cnf.input_channels,
cnf.expanded_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
# depthwise
stride = 1 if cnf.dilation > 1 else cnf.stride
layers.append(
Conv2dNormActivation(
cnf.expanded_channels,
cnf.expanded_channels,
kernel_size=cnf.kernel,
stride=stride,
dilation=cnf.dilation,
groups=cnf.expanded_channels,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
if cnf.use_se:
squeeze_channels = _make_divisible(cnf.expanded_channels // 4, 8)
layers.append(se_layer(cnf.expanded_channels, squeeze_channels))
# project
layers.append(
Conv2dNormActivation(
cnf.expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
)
)
self.block = nn.Sequential(*layers)
self.out_channels = cnf.out_channels
self._is_cn = cnf.stride > 1
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result += input
return result
class MobileNetV3(nn.Module):
def __init__(
self,
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
num_classes: int = 1000,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
dropout: float = 0.2,
**kwargs: Any,
) -> None:
"""
MobileNet V3 main class
Args:
inverted_residual_setting (List[InvertedResidualConfig]): Network structure
last_channel (int): The number of channels on the penultimate layer
num_classes (int): Number of classes
block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
dropout (float): The droupout probability
"""
super().__init__()
_log_api_usage_once(self)
if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
elif not (
isinstance(inverted_residual_setting, Sequence)
and all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting])
):
raise TypeError("The inverted_residual_setting should be List[InvertedResidualConfig]")
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)
layers: List[nn.Module] = []
# building first layer
firstconv_output_channels = inverted_residual_setting[0].input_channels
layers.append(
Conv2dNormActivation(
3,
firstconv_output_channels,
kernel_size=3,
stride=2,
norm_layer=norm_layer,
activation_layer=nn.Hardswish,
)
)
# building inverted residual blocks
for cnf in inverted_residual_setting:
layers.append(block(cnf, norm_layer))
# building last several layers
lastconv_input_channels = inverted_residual_setting[-1].out_channels
lastconv_output_channels = 6 * lastconv_input_channels
layers.append(
Conv2dNormActivation(
lastconv_input_channels,
lastconv_output_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.Hardswish,
)
)
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Linear(lastconv_output_channels, last_channel),
nn.Hardswish(inplace=True),
nn.Dropout(p=dropout, inplace=True),
nn.Linear(last_channel, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _mobilenet_v3_conf(
arch: str, width_mult: float = 1.0, reduced_tail: bool = False, dilated: bool = False, **kwargs: Any
):
reduce_divider = 2 if reduced_tail else 1
dilation = 2 if dilated else 1
bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult)
adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult)
if arch == "mobilenet_v3_large":
inverted_residual_setting = [
bneck_conf(16, 3, 16, 16, False, "RE", 1, 1),
bneck_conf(16, 3, 64, 24, False, "RE", 2, 1), # C1
bneck_conf(24, 3, 72, 24, False, "RE", 1, 1),
bneck_conf(24, 5, 72, 40, True, "RE", 2, 1), # C2
bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
bneck_conf(40, 3, 240, 80, False, "HS", 2, 1), # C3
bneck_conf(80, 3, 200, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 480, 112, True, "HS", 1, 1),
bneck_conf(112, 3, 672, 112, True, "HS", 1, 1),
bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2, dilation), # C4
bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
]
last_channel = adjust_channels(1280 // reduce_divider) # C5
elif arch == "mobilenet_v3_small":
inverted_residual_setting = [
bneck_conf(16, 3, 16, 16, True, "RE", 2, 1), # C1
bneck_conf(16, 3, 72, 24, False, "RE", 2, 1), # C2
bneck_conf(24, 3, 88, 24, False, "RE", 1, 1),
bneck_conf(24, 5, 96, 40, True, "HS", 2, 1), # C3
bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
bneck_conf(40, 5, 120, 48, True, "HS", 1, 1),
bneck_conf(48, 5, 144, 48, True, "HS", 1, 1),
bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2, dilation), # C4
bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
]
last_channel = adjust_channels(1024 // reduce_divider) # C5
else:
raise ValueError(f"Unsupported model type {arch}")
return inverted_residual_setting, last_channel
def _mobilenet_v3(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> MobileNetV3:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
}
class MobileNet_V3_Large_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.042,
"acc@5": 91.340,
}
},
"_ops": 0.217,
"_file_size": 21.114,
"_docs": """These weights were trained from scratch by using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.274,
"acc@5": 92.566,
}
},
"_ops": 0.217,
"_file_size": 21.107,
"_docs": """
These weights improve marginally upon the results of the original paper by using a modified version of
TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class MobileNet_V3_Small_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2542856,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"_metrics": {
"ImageNet-1K": {
"acc@1": 67.668,
"acc@5": 87.402,
}
},
"_ops": 0.057,
"_file_size": 9.829,
"_docs": """
These weights improve upon the results of the original paper by using a simple training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Large_Weights.IMAGENET1K_V1))
def mobilenet_v3_large(
*, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
"""
Constructs a large MobileNetV3 architecture from
`Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.
Args:
weights (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MobileNet_V3_Large_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MobileNet_V3_Large_Weights
:members:
"""
weights = MobileNet_V3_Large_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Small_Weights.IMAGENET1K_V1))
def mobilenet_v3_small(
*, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
"""
Constructs a small MobileNetV3 architecture from
`Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.
Args:
weights (:class:`~torchvision.models.MobileNet_V3_Small_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MobileNet_V3_Small_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MobileNet_V3_Small_Weights
:members:
"""
weights = MobileNet_V3_Small_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
```
|
===================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\optical_flow\__init__.py
ENCODING: utf-8
```py
from .raft import *
```
|
=================================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 2.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\optical_flow\_utils.py
ENCODING: utf-8
```py
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor
def grid_sample(img: Tensor, absolute_grid: Tensor, mode: str = "bilinear", align_corners: Optional[bool] = None):
"""Same as torch's grid_sample, with absolute pixel coordinates instead of normalized coordinates."""
h, w = img.shape[-2:]
xgrid, ygrid = absolute_grid.split([1, 1], dim=-1)
xgrid = 2 * xgrid / (w - 1) - 1
# Adding condition if h > 1 to enable this function be reused in raft-stereo
if h > 1:
ygrid = 2 * ygrid / (h - 1) - 1
normalized_grid = torch.cat([xgrid, ygrid], dim=-1)
return F.grid_sample(img, normalized_grid, mode=mode, align_corners=align_corners)
def make_coords_grid(batch_size: int, h: int, w: int, device: str = "cpu"):
device = torch.device(device)
coords = torch.meshgrid(torch.arange(h, device=device), torch.arange(w, device=device), indexing="ij")
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch_size, 1, 1, 1)
def upsample_flow(flow, up_mask: Optional[Tensor] = None, factor: int = 8):
"""Upsample flow by the input factor (default 8).
If up_mask is None we just interpolate.
If up_mask is specified, we upsample using a convex combination of its weights. See paper page 8 and appendix B.
Note that in appendix B the picture assumes a downsample factor of 4 instead of 8.
"""
batch_size, num_channels, h, w = flow.shape
new_h, new_w = h * factor, w * factor
if up_mask is None:
return factor * F.interpolate(flow, size=(new_h, new_w), mode="bilinear", align_corners=True)
up_mask = up_mask.view(batch_size, 1, 9, factor, factor, h, w)
up_mask = torch.softmax(up_mask, dim=2) # "convex" == weights sum to 1
upsampled_flow = F.unfold(factor * flow, kernel_size=3, padding=1).view(batch_size, num_channels, 9, 1, 1, h, w)
upsampled_flow = torch.sum(up_mask * upsampled_flow, dim=2)
return upsampled_flow.permute(0, 1, 4, 2, 5, 3).reshape(batch_size, num_channels, new_h, new_w)
```
|
===============================================================================================================================
SOURCE CODE FILE: raft.py
LINES: 1
SIZE: 39.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\optical_flow\raft.py
ENCODING: utf-8
```py
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.modules.batchnorm import BatchNorm2d
from torch.nn.modules.instancenorm import InstanceNorm2d
from torchvision.ops import Conv2dNormActivation
from ...transforms._presets import OpticalFlow
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._utils import handle_legacy_interface
from ._utils import grid_sample, make_coords_grid, upsample_flow
__all__ = (
"RAFT",
"raft_large",
"raft_small",
"Raft_Large_Weights",
"Raft_Small_Weights",
)
class ResidualBlock(nn.Module):
"""Slightly modified Residual block with extra relu and biases."""
def __init__(self, in_channels, out_channels, *, norm_layer, stride=1, always_project: bool = False):
super().__init__()
# Note regarding bias=True:
# Usually we can pass bias=False in conv layers followed by a norm layer.
# But in the RAFT training reference, the BatchNorm2d layers are only activated for the first dataset,
# and frozen for the rest of the training process (i.e. set as eval()). The bias term is thus still useful
# for the rest of the datasets. Technically, we could remove the bias for other norm layers like Instance norm
# because these aren't frozen, but we don't bother (also, we wouldn't be able to load the original weights).
self.convnormrelu1 = Conv2dNormActivation(
in_channels, out_channels, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
)
self.convnormrelu2 = Conv2dNormActivation(
out_channels, out_channels, norm_layer=norm_layer, kernel_size=3, bias=True
)
# make mypy happy
self.downsample: nn.Module
if stride == 1 and not always_project:
self.downsample = nn.Identity()
else:
self.downsample = Conv2dNormActivation(
in_channels,
out_channels,
norm_layer=norm_layer,
kernel_size=1,
stride=stride,
bias=True,
activation_layer=None,
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = x
y = self.convnormrelu1(y)
y = self.convnormrelu2(y)
x = self.downsample(x)
return self.relu(x + y)
class BottleneckBlock(nn.Module):
"""Slightly modified BottleNeck block (extra relu and biases)"""
def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
super().__init__()
# See note in ResidualBlock for the reason behind bias=True
self.convnormrelu1 = Conv2dNormActivation(
in_channels, out_channels // 4, norm_layer=norm_layer, kernel_size=1, bias=True
)
self.convnormrelu2 = Conv2dNormActivation(
out_channels // 4, out_channels // 4, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
)
self.convnormrelu3 = Conv2dNormActivation(
out_channels // 4, out_channels, norm_layer=norm_layer, kernel_size=1, bias=True
)
self.relu = nn.ReLU(inplace=True)
if stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = Conv2dNormActivation(
in_channels,
out_channels,
norm_layer=norm_layer,
kernel_size=1,
stride=stride,
bias=True,
activation_layer=None,
)
def forward(self, x):
y = x
y = self.convnormrelu1(y)
y = self.convnormrelu2(y)
y = self.convnormrelu3(y)
x = self.downsample(x)
return self.relu(x + y)
class FeatureEncoder(nn.Module):
"""The feature encoder, used both as the actual feature encoder, and as the context encoder.
It must downsample its input by 8.
"""
def __init__(
self, *, block=ResidualBlock, layers=(64, 64, 96, 128, 256), strides=(2, 1, 2, 2), norm_layer=nn.BatchNorm2d
):
super().__init__()
if len(layers) != 5:
raise ValueError(f"The expected number of layers is 5, instead got {len(layers)}")
# See note in ResidualBlock for the reason behind bias=True
self.convnormrelu = Conv2dNormActivation(
3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=strides[0], bias=True
)
self.layer1 = self._make_2_blocks(block, layers[0], layers[1], norm_layer=norm_layer, first_stride=strides[1])
self.layer2 = self._make_2_blocks(block, layers[1], layers[2], norm_layer=norm_layer, first_stride=strides[2])
self.layer3 = self._make_2_blocks(block, layers[2], layers[3], norm_layer=norm_layer, first_stride=strides[3])
self.conv = nn.Conv2d(layers[3], layers[4], kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
num_downsamples = len(list(filter(lambda s: s == 2, strides)))
self.output_dim = layers[-1]
self.downsample_factor = 2**num_downsamples
def _make_2_blocks(self, block, in_channels, out_channels, norm_layer, first_stride):
block1 = block(in_channels, out_channels, norm_layer=norm_layer, stride=first_stride)
block2 = block(out_channels, out_channels, norm_layer=norm_layer, stride=1)
return nn.Sequential(block1, block2)
def forward(self, x):
x = self.convnormrelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv(x)
return x
class MotionEncoder(nn.Module):
"""The motion encoder, part of the update block.
Takes the current predicted flow and the correlation features as input and returns an encoded version of these.
"""
def __init__(self, *, in_channels_corr, corr_layers=(256, 192), flow_layers=(128, 64), out_channels=128):
super().__init__()
if len(flow_layers) != 2:
raise ValueError(f"The expected number of flow_layers is 2, instead got {len(flow_layers)}")
if len(corr_layers) not in (1, 2):
raise ValueError(f"The number of corr_layers should be 1 or 2, instead got {len(corr_layers)}")
self.convcorr1 = Conv2dNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1)
if len(corr_layers) == 2:
self.convcorr2 = Conv2dNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3)
else:
self.convcorr2 = nn.Identity()
self.convflow1 = Conv2dNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7)
self.convflow2 = Conv2dNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3)
# out_channels - 2 because we cat the flow (2 channels) at the end
self.conv = Conv2dNormActivation(
corr_layers[-1] + flow_layers[-1], out_channels - 2, norm_layer=None, kernel_size=3
)
self.out_channels = out_channels
def forward(self, flow, corr_features):
corr = self.convcorr1(corr_features)
corr = self.convcorr2(corr)
flow_orig = flow
flow = self.convflow1(flow)
flow = self.convflow2(flow)
corr_flow = torch.cat([corr, flow], dim=1)
corr_flow = self.conv(corr_flow)
return torch.cat([corr_flow, flow_orig], dim=1)
class ConvGRU(nn.Module):
"""Convolutional Gru unit."""
def __init__(self, *, input_size, hidden_size, kernel_size, padding):
super().__init__()
self.convz = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
self.convr = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
self.convq = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
return h
def _pass_through_h(h, _):
# Declared here for torchscript
return h
class RecurrentBlock(nn.Module):
"""Recurrent block, part of the update block.
Takes the current hidden state and the concatenation of (motion encoder output, context) as input.
Returns an updated hidden state.
"""
def __init__(self, *, input_size, hidden_size, kernel_size=((1, 5), (5, 1)), padding=((0, 2), (2, 0))):
super().__init__()
if len(kernel_size) != len(padding):
raise ValueError(
f"kernel_size should have the same length as padding, instead got len(kernel_size) = {len(kernel_size)} and len(padding) = {len(padding)}"
)
if len(kernel_size) not in (1, 2):
raise ValueError(f"kernel_size should either 1 or 2, instead got {len(kernel_size)}")
self.convgru1 = ConvGRU(
input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[0], padding=padding[0]
)
if len(kernel_size) == 2:
self.convgru2 = ConvGRU(
input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[1], padding=padding[1]
)
else:
self.convgru2 = _pass_through_h
self.hidden_size = hidden_size
def forward(self, h, x):
h = self.convgru1(h, x)
h = self.convgru2(h, x)
return h
class FlowHead(nn.Module):
"""Flow head, part of the update block.
Takes the hidden state of the recurrent unit as input, and outputs the predicted "delta flow".
"""
def __init__(self, *, in_channels, hidden_size):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, hidden_size, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_size, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class UpdateBlock(nn.Module):
"""The update block which contains the motion encoder, the recurrent block, and the flow head.
It must expose a ``hidden_state_size`` attribute which is the hidden state size of its recurrent block.
"""
def __init__(self, *, motion_encoder, recurrent_block, flow_head):
super().__init__()
self.motion_encoder = motion_encoder
self.recurrent_block = recurrent_block
self.flow_head = flow_head
self.hidden_state_size = recurrent_block.hidden_size
def forward(self, hidden_state, context, corr_features, flow):
motion_features = self.motion_encoder(flow, corr_features)
x = torch.cat([context, motion_features], dim=1)
hidden_state = self.recurrent_block(hidden_state, x)
delta_flow = self.flow_head(hidden_state)
return hidden_state, delta_flow
class MaskPredictor(nn.Module):
"""Mask predictor to be used when upsampling the predicted flow.
It takes the hidden state of the recurrent unit as input and outputs the mask.
This is not used in the raft-small model.
"""
def __init__(self, *, in_channels, hidden_size, multiplier=0.25):
super().__init__()
self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
# 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder,
# and we interpolate with all 9 surrounding neighbors. See paper and appendix B.
self.conv = nn.Conv2d(hidden_size, 8 * 8 * 9, 1, padding=0)
# In the original code, they use a factor of 0.25 to "downweight the gradients" of that branch.
# See e.g. https://github.com/princeton-vl/RAFT/issues/119#issuecomment-953950419
# or https://github.com/princeton-vl/RAFT/issues/24.
# It doesn't seem to affect epe significantly and can likely be set to 1.
self.multiplier = multiplier
def forward(self, x):
x = self.convrelu(x)
x = self.conv(x)
return self.multiplier * x
class CorrBlock(nn.Module):
"""The correlation block.
Creates a correlation pyramid with ``num_levels`` levels from the outputs of the feature encoder,
and then indexes from this pyramid to create correlation features.
The "indexing" of a given centroid pixel x' is done by concatenating its surrounding neighbors that
are within a ``radius``, according to the infinity norm (see paper section 3.2).
Note: typo in the paper, it should be infinity norm, not 1-norm.
"""
def __init__(self, *, num_levels: int = 4, radius: int = 4):
super().__init__()
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid: List[Tensor] = [torch.tensor(0)] # useless, but torchscript is otherwise confused :')
# The neighborhood of a centroid pixel x' is {x' + delta, ||delta||_inf <= radius}
# so it's a square surrounding x', and its sides have a length of 2 * radius + 1
# The paper claims that it's ||.||_1 instead of ||.||_inf but it's a typo:
# https://github.com/princeton-vl/RAFT/issues/122
self.out_channels = num_levels * (2 * radius + 1) ** 2
def build_pyramid(self, fmap1, fmap2):
"""Build the correlation pyramid from two feature maps.
The correlation volume is first computed as the dot product of each pair (pixel_in_fmap1, pixel_in_fmap2)
The last 2 dimensions of the correlation volume are then pooled num_levels times at different resolutions
to build the correlation pyramid.
"""
if fmap1.shape != fmap2.shape:
raise ValueError(
f"Input feature maps should have the same shape, instead got {fmap1.shape} (fmap1.shape) != {fmap2.shape} (fmap2.shape)"
)
# Explaining min_fmap_size below: the fmaps are down-sampled (num_levels - 1) times by a factor of 2.
# The last corr_volume most have at least 2 values (hence the 2* factor), otherwise grid_sample() would
# produce nans in its output.
min_fmap_size = 2 * (2 ** (self.num_levels - 1))
if any(fmap_size < min_fmap_size for fmap_size in fmap1.shape[-2:]):
raise ValueError(
"Feature maps are too small to be down-sampled by the correlation pyramid. "
f"H and W of feature maps should be at least {min_fmap_size}; got: {fmap1.shape[-2:]}. "
"Remember that input images to the model are downsampled by 8, so that means their "
f"dimensions should be at least 8 * {min_fmap_size} = {8 * min_fmap_size}."
)
corr_volume = self._compute_corr_volume(fmap1, fmap2)
batch_size, h, w, num_channels, _, _ = corr_volume.shape # _, _ = h, w
corr_volume = corr_volume.reshape(batch_size * h * w, num_channels, h, w)
self.corr_pyramid = [corr_volume]
for _ in range(self.num_levels - 1):
corr_volume = F.avg_pool2d(corr_volume, kernel_size=2, stride=2)
self.corr_pyramid.append(corr_volume)
def index_pyramid(self, centroids_coords):
"""Return correlation features by indexing from the pyramid."""
neighborhood_side_len = 2 * self.radius + 1 # see note in __init__ about out_channels
di = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
dj = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
delta = torch.stack(torch.meshgrid(di, dj, indexing="ij"), dim=-1).to(centroids_coords.device)
delta = delta.view(1, neighborhood_side_len, neighborhood_side_len, 2)
batch_size, _, h, w = centroids_coords.shape # _ = 2
centroids_coords = centroids_coords.permute(0, 2, 3, 1).reshape(batch_size * h * w, 1, 1, 2)
indexed_pyramid = []
for corr_volume in self.corr_pyramid:
sampling_coords = centroids_coords + delta # end shape is (batch_size * h * w, side_len, side_len, 2)
indexed_corr_volume = grid_sample(corr_volume, sampling_coords, align_corners=True, mode="bilinear").view(
batch_size, h, w, -1
)
indexed_pyramid.append(indexed_corr_volume)
centroids_coords = centroids_coords / 2
corr_features = torch.cat(indexed_pyramid, dim=-1).permute(0, 3, 1, 2).contiguous()
expected_output_shape = (batch_size, self.out_channels, h, w)
if corr_features.shape != expected_output_shape:
raise ValueError(
f"Output shape of index pyramid is incorrect. Should be {expected_output_shape}, got {corr_features.shape}"
)
return corr_features
def _compute_corr_volume(self, fmap1, fmap2):
batch_size, num_channels, h, w = fmap1.shape
fmap1 = fmap1.view(batch_size, num_channels, h * w)
fmap2 = fmap2.view(batch_size, num_channels, h * w)
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
corr = corr.view(batch_size, h, w, 1, h, w)
return corr / torch.sqrt(torch.tensor(num_channels))
class RAFT(nn.Module):
def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None):
"""RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
args:
feature_encoder (nn.Module): The feature encoder. It must downsample the input by 8.
Its input is the concatenation of ``image1`` and ``image2``.
context_encoder (nn.Module): The context encoder. It must downsample the input by 8.
Its input is ``image1``. As in the original implementation, its output will be split into 2 parts:
- one part will be used as the actual "context", passed to the recurrent unit of the ``update_block``
- one part will be used to initialize the hidden state of the recurrent unit of
the ``update_block``
These 2 parts are split according to the ``hidden_state_size`` of the ``update_block``, so the output
of the ``context_encoder`` must be strictly greater than ``hidden_state_size``.
corr_block (nn.Module): The correlation block, which creates a correlation pyramid from the output of the
``feature_encoder``, and then indexes from this pyramid to create correlation features. It must expose
2 methods:
- a ``build_pyramid`` method that takes ``feature_map_1`` and ``feature_map_2`` as input (these are the
output of the ``feature_encoder``).
- a ``index_pyramid`` method that takes the coordinates of the centroid pixels as input, and returns
the correlation features. See paper section 3.2.
It must expose an ``out_channels`` attribute.
update_block (nn.Module): The update block, which contains the motion encoder, the recurrent unit, and the
flow head. It takes as input the hidden state of its recurrent unit, the context, the correlation
features, and the current predicted flow. It outputs an updated hidden state, and the ``delta_flow``
prediction (see paper appendix A). It must expose a ``hidden_state_size`` attribute.
mask_predictor (nn.Module, optional): Predicts the mask that will be used to upsample the predicted flow.
The output channel must be 8 * 8 * 9 - see paper section 3.3, and Appendix B.
If ``None`` (default), the flow is upsampled using interpolation.
"""
super().__init__()
_log_api_usage_once(self)
self.feature_encoder = feature_encoder
self.context_encoder = context_encoder
self.corr_block = corr_block
self.update_block = update_block
self.mask_predictor = mask_predictor
if not hasattr(self.update_block, "hidden_state_size"):
raise ValueError("The update_block parameter should expose a 'hidden_state_size' attribute.")
def forward(self, image1, image2, num_flow_updates: int = 12):
batch_size, _, h, w = image1.shape
if (h, w) != image2.shape[-2:]:
raise ValueError(f"input images should have the same shape, instead got ({h}, {w}) != {image2.shape[-2:]}")
if not ((h % 8 == 0) and (w % 8 == 0)):
raise ValueError(f"input image H and W should be divisible by 8, instead got {h} (h) and {w} (w)")
fmaps = self.feature_encoder(torch.cat([image1, image2], dim=0))
fmap1, fmap2 = torch.chunk(fmaps, chunks=2, dim=0)
if fmap1.shape[-2:] != (h // 8, w // 8):
raise ValueError("The feature encoder should downsample H and W by 8")
self.corr_block.build_pyramid(fmap1, fmap2)
context_out = self.context_encoder(image1)
if context_out.shape[-2:] != (h // 8, w // 8):
raise ValueError("The context encoder should downsample H and W by 8")
# As in the original paper, the actual output of the context encoder is split in 2 parts:
# - one part is used to initialize the hidden state of the recurent units of the update block
# - the rest is the "actual" context.
hidden_state_size = self.update_block.hidden_state_size
out_channels_context = context_out.shape[1] - hidden_state_size
if out_channels_context <= 0:
raise ValueError(
f"The context encoder outputs {context_out.shape[1]} channels, but it should have at strictly more than hidden_state={hidden_state_size} channels"
)
hidden_state, context = torch.split(context_out, [hidden_state_size, out_channels_context], dim=1)
hidden_state = torch.tanh(hidden_state)
context = F.relu(context)
coords0 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
coords1 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
flow_predictions = []
for _ in range(num_flow_updates):
coords1 = coords1.detach() # Don't backpropagate gradients through this branch, see paper
corr_features = self.corr_block.index_pyramid(centroids_coords=coords1)
flow = coords1 - coords0
hidden_state, delta_flow = self.update_block(hidden_state, context, corr_features, flow)
coords1 = coords1 + delta_flow
up_mask = None if self.mask_predictor is None else self.mask_predictor(hidden_state)
upsampled_flow = upsample_flow(flow=(coords1 - coords0), up_mask=up_mask)
flow_predictions.append(upsampled_flow)
return flow_predictions
_COMMON_META = {
"min_size": (128, 128),
}
class Raft_Large_Weights(WeightsEnum):
"""The metrics reported here are as follows.
``epe`` is the "end-point-error" and indicates how far (in pixels) the
predicted flow is from its true value. This is averaged over all pixels
of all images. ``per_image_epe`` is similar, but the average is different:
the epe is first computed on each image independently, and then averaged
over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
in the original paper, and it's only used on Kitti. ``fl-all`` is also a
Kitti-specific metric, defined by the author of the dataset and used for the
Kitti leaderboard. It corresponds to the average of pixels whose epe is
either <3px, or <5% of flow's 2-norm.
"""
C_T_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_large_C_T_V1-22a6c225.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 1.4411},
"Sintel-Train-Finalpass": {"epe": 2.7894},
"Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
C_T_V2 = Weights(
url="https://download.pytorch.org/models/raft_large_C_T_V2-1bb1363a.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 1.3822},
"Sintel-Train-Finalpass": {"epe": 2.7161},
"Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
C_T_SKHT_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V1-0b8c9e55.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Sintel-Test-Cleanpass": {"epe": 1.94},
"Sintel-Test-Finalpass": {"epe": 3.18},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were ported from the original paper. They are
trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D` and fine-tuned on
Sintel. The Sintel fine-tuning step is a combination of
:class:`~torchvision.datasets.Sintel`,
:class:`~torchvision.datasets.KittiFlow`,
:class:`~torchvision.datasets.HD1K`, and
:class:`~torchvision.datasets.FlyingThings3D` (clean pass).
""",
},
)
C_T_SKHT_V2 = Weights(
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V2-ff5fadd5.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Sintel-Test-Cleanpass": {"epe": 1.819},
"Sintel-Test-Finalpass": {"epe": 3.067},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D` and then
fine-tuned on Sintel. The Sintel fine-tuning step is a
combination of :class:`~torchvision.datasets.Sintel`,
:class:`~torchvision.datasets.KittiFlow`,
:class:`~torchvision.datasets.HD1K`, and
:class:`~torchvision.datasets.FlyingThings3D` (clean pass).
""",
},
)
C_T_SKHT_K_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V1-4a6a5039.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Kitti-Test": {"fl_all": 5.10},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were ported from the original paper. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`,
fine-tuned on Sintel, and then fine-tuned on
:class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
step was described above.
""",
},
)
C_T_SKHT_K_V2 = Weights(
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V2-b5c70766.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Kitti-Test": {"fl_all": 5.19},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`,
fine-tuned on Sintel, and then fine-tuned on
:class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
step was described above.
""",
},
)
DEFAULT = C_T_SKHT_V2
class Raft_Small_Weights(WeightsEnum):
"""The metrics reported here are as follows.
``epe`` is the "end-point-error" and indicates how far (in pixels) the
predicted flow is from its true value. This is averaged over all pixels
of all images. ``per_image_epe`` is similar, but the average is different:
the epe is first computed on each image independently, and then averaged
over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
in the original paper, and it's only used on Kitti. ``fl-all`` is also a
Kitti-specific metric, defined by the author of the dataset and used for the
Kitti leaderboard. It corresponds to the average of pixels whose epe is
either <3px, or <5% of flow's 2-norm.
"""
C_T_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_small_C_T_V1-ad48884c.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 990162,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 2.1231},
"Sintel-Train-Finalpass": {"epe": 3.2790},
"Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801},
},
"_ops": 47.655,
"_file_size": 3.821,
"_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
C_T_V2 = Weights(
url="https://download.pytorch.org/models/raft_small_C_T_V2-01064c6d.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 990162,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 1.9901},
"Sintel-Train-Finalpass": {"epe": 3.2831},
"Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369},
},
"_ops": 47.655,
"_file_size": 3.821,
"_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
DEFAULT = C_T_V2
def _raft(
*,
weights=None,
progress=False,
# Feature encoder
feature_encoder_layers,
feature_encoder_block,
feature_encoder_norm_layer,
# Context encoder
context_encoder_layers,
context_encoder_block,
context_encoder_norm_layer,
# Correlation block
corr_block_num_levels,
corr_block_radius,
# Motion encoder
motion_encoder_corr_layers,
motion_encoder_flow_layers,
motion_encoder_out_channels,
# Recurrent block
recurrent_block_hidden_state_size,
recurrent_block_kernel_size,
recurrent_block_padding,
# Flow Head
flow_head_hidden_size,
# Mask predictor
use_mask_predictor,
**kwargs,
):
feature_encoder = kwargs.pop("feature_encoder", None) or FeatureEncoder(
block=feature_encoder_block, layers=feature_encoder_layers, norm_layer=feature_encoder_norm_layer
)
context_encoder = kwargs.pop("context_encoder", None) or FeatureEncoder(
block=context_encoder_block, layers=context_encoder_layers, norm_layer=context_encoder_norm_layer
)
corr_block = kwargs.pop("corr_block", None) or CorrBlock(num_levels=corr_block_num_levels, radius=corr_block_radius)
update_block = kwargs.pop("update_block", None)
if update_block is None:
motion_encoder = MotionEncoder(
in_channels_corr=corr_block.out_channels,
corr_layers=motion_encoder_corr_layers,
flow_layers=motion_encoder_flow_layers,
out_channels=motion_encoder_out_channels,
)
# See comments in forward pass of RAFT class about why we split the output of the context encoder
out_channels_context = context_encoder_layers[-1] - recurrent_block_hidden_state_size
recurrent_block = RecurrentBlock(
input_size=motion_encoder.out_channels + out_channels_context,
hidden_size=recurrent_block_hidden_state_size,
kernel_size=recurrent_block_kernel_size,
padding=recurrent_block_padding,
)
flow_head = FlowHead(in_channels=recurrent_block_hidden_state_size, hidden_size=flow_head_hidden_size)
update_block = UpdateBlock(motion_encoder=motion_encoder, recurrent_block=recurrent_block, flow_head=flow_head)
mask_predictor = kwargs.pop("mask_predictor", None)
if mask_predictor is None and use_mask_predictor:
mask_predictor = MaskPredictor(
in_channels=recurrent_block_hidden_state_size,
hidden_size=256,
multiplier=0.25, # See comment in MaskPredictor about this
)
model = RAFT(
feature_encoder=feature_encoder,
context_encoder=context_encoder,
corr_block=corr_block,
update_block=update_block,
mask_predictor=mask_predictor,
**kwargs, # not really needed, all params should be consumed by now
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
@register_model()
@handle_legacy_interface(weights=("pretrained", Raft_Large_Weights.C_T_SKHT_V2))
def raft_large(*, weights: Optional[Raft_Large_Weights] = None, progress=True, **kwargs) -> RAFT:
"""RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
Please see the example below for a tutorial on how to use this model.
Args:
weights(:class:`~torchvision.models.optical_flow.Raft_Large_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.optical_flow.Raft_Large_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
for more details about this class.
.. autoclass:: torchvision.models.optical_flow.Raft_Large_Weights
:members:
"""
weights = Raft_Large_Weights.verify(weights)
return _raft(
weights=weights,
progress=progress,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_block=ResidualBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(64, 64, 96, 128, 256),
context_encoder_block=ResidualBlock,
context_encoder_norm_layer=BatchNorm2d,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=4,
# Motion encoder
motion_encoder_corr_layers=(256, 192),
motion_encoder_flow_layers=(128, 64),
motion_encoder_out_channels=128,
# Recurrent block
recurrent_block_hidden_state_size=128,
recurrent_block_kernel_size=((1, 5), (5, 1)),
recurrent_block_padding=((0, 2), (2, 0)),
# Flow head
flow_head_hidden_size=256,
# Mask predictor
use_mask_predictor=True,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Raft_Small_Weights.C_T_V2))
def raft_small(*, weights: Optional[Raft_Small_Weights] = None, progress=True, **kwargs) -> RAFT:
"""RAFT "small" model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`__.
Please see the example below for a tutorial on how to use this model.
Args:
weights(:class:`~torchvision.models.optical_flow.Raft_Small_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.optical_flow.Raft_Small_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
for more details about this class.
.. autoclass:: torchvision.models.optical_flow.Raft_Small_Weights
:members:
"""
weights = Raft_Small_Weights.verify(weights)
return _raft(
weights=weights,
progress=progress,
# Feature encoder
feature_encoder_layers=(32, 32, 64, 96, 128),
feature_encoder_block=BottleneckBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(32, 32, 64, 96, 160),
context_encoder_block=BottleneckBlock,
context_encoder_norm_layer=None,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=3,
# Motion encoder
motion_encoder_corr_layers=(96,),
motion_encoder_flow_layers=(64, 32),
motion_encoder_out_channels=82,
# Recurrent block
recurrent_block_hidden_state_size=96,
recurrent_block_kernel_size=(3,),
recurrent_block_padding=(1,),
# Flow head
flow_head_hidden_size=128,
# Mask predictor
use_mask_predictor=False,
**kwargs,
)
```
|
===================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\__init__.py
ENCODING: utf-8
```py
from .googlenet import *
from .inception import *
from .mobilenet import *
from .resnet import *
from .shufflenetv2 import *
```
|
====================================================================================================================================
SOURCE CODE FILE: googlenet.py
LINES: 1
SIZE: 8.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\googlenet.py
ENCODING: utf-8
```py
import warnings
from functools import partial
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import functional as F
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from ..googlenet import BasicConv2d, GoogLeNet, GoogLeNet_Weights, GoogLeNetOutputs, Inception, InceptionAux
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableGoogLeNet",
"GoogLeNet_QuantizedWeights",
"googlenet",
]
class QuantizableBasicConv2d(BasicConv2d):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
class QuantizableInception(Inception):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.cat = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.cat.cat(outputs, 1)
class QuantizableInceptionAux(InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = self.relu(self.fc1(x))
# N x 1024
x = self.dropout(x)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class QuantizableGoogLeNet(GoogLeNet):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__( # type: ignore[misc]
*args, blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], **kwargs
)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> GoogLeNetOutputs:
x = self._transform_input(x)
x = self.quant(x)
x, aux1, aux2 = self._forward(x)
x = self.dequant(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple")
return GoogLeNetOutputs(x, aux2, aux1)
else:
return self.eager_outputs(x, aux2, aux1)
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in googlenet model
Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
for m in self.modules():
if type(m) is QuantizableBasicConv2d:
m.fuse_model(is_qat)
class GoogLeNet_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/googlenet_fbgemm-c81f6644.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 6624904,
"min_size": (15, 15),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": GoogLeNet_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.826,
"acc@5": 89.404,
}
},
"_ops": 1.498,
"_file_size": 12.618,
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@register_model(name="quantized_googlenet")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: GoogLeNet_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else GoogLeNet_Weights.IMAGENET1K_V1,
)
)
def googlenet(
*,
weights: Optional[Union[GoogLeNet_QuantizedWeights, GoogLeNet_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableGoogLeNet:
"""GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`__.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableGoogLeNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/googlenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.GoogLeNet_QuantizedWeights
:members:
.. autoclass:: torchvision.models.GoogLeNet_Weights
:members:
:noindex:
"""
weights = (GoogLeNet_QuantizedWeights if quantize else GoogLeNet_Weights).verify(weights)
original_aux_logits = kwargs.get("aux_logits", False)
if weights is not None:
if "transform_input" not in kwargs:
_ovewrite_named_param(kwargs, "transform_input", True)
_ovewrite_named_param(kwargs, "aux_logits", True)
_ovewrite_named_param(kwargs, "init_weights", False)
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableGoogLeNet(**kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if not original_aux_logits:
model.aux_logits = False
model.aux1 = None # type: ignore[assignment]
model.aux2 = None # type: ignore[assignment]
else:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
)
return model
```
|
====================================================================================================================================
SOURCE CODE FILE: inception.py
LINES: 1
SIZE: 10.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\inception.py
ENCODING: utf-8
```py
import warnings
from functools import partial
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torchvision.models import inception as inception_module
from torchvision.models.inception import Inception_V3_Weights, InceptionOutputs
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableInception3",
"Inception_V3_QuantizedWeights",
"inception_v3",
]
class QuantizableBasicConv2d(inception_module.BasicConv2d):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
class QuantizableInceptionA(inception_module.InceptionA):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionB(inception_module.InceptionB):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionC(inception_module.InceptionC):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionD(inception_module.InceptionD):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionE(inception_module.InceptionE):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop1 = nn.quantized.FloatFunctional()
self.myop2 = nn.quantized.FloatFunctional()
self.myop3 = nn.quantized.FloatFunctional()
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = self.myop1.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = self.myop2.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop3.cat(outputs, 1)
class QuantizableInceptionAux(inception_module.InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
class QuantizableInception3(inception_module.Inception3):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__( # type: ignore[misc]
*args,
inception_blocks=[
QuantizableBasicConv2d,
QuantizableInceptionA,
QuantizableInceptionB,
QuantizableInceptionC,
QuantizableInceptionD,
QuantizableInceptionE,
QuantizableInceptionAux,
],
**kwargs,
)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> InceptionOutputs:
x = self._transform_input(x)
x = self.quant(x)
x, aux = self._forward(x)
x = self.dequant(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted QuantizableInception3 always returns QuantizableInception3 Tuple")
return InceptionOutputs(x, aux)
else:
return self.eager_outputs(x, aux)
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in inception model
Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
for m in self.modules():
if type(m) is QuantizableBasicConv2d:
m.fuse_model(is_qat)
class Inception_V3_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-a2837893.pth",
transforms=partial(ImageClassification, crop_size=299, resize_size=342),
meta={
"num_params": 27161264,
"min_size": (75, 75),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": Inception_V3_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.176,
"acc@5": 93.354,
}
},
"_ops": 5.713,
"_file_size": 23.146,
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@register_model(name="quantized_inception_v3")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: Inception_V3_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else Inception_V3_Weights.IMAGENET1K_V1,
)
)
def inception_v3(
*,
weights: Optional[Union[Inception_V3_QuantizedWeights, Inception_V3_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableInception3:
r"""Inception v3 model architecture from
`Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
weights for the model. See
:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr.
Default is True.
quantize (bool, optional): If True, return a quantized version of the model.
Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
:members:
.. autoclass:: torchvision.models.Inception_V3_Weights
:members:
:noindex:
"""
weights = (Inception_V3_QuantizedWeights if quantize else Inception_V3_Weights).verify(weights)
original_aux_logits = kwargs.get("aux_logits", False)
if weights is not None:
if "transform_input" not in kwargs:
_ovewrite_named_param(kwargs, "transform_input", True)
_ovewrite_named_param(kwargs, "aux_logits", True)
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableInception3(**kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
if quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if not quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
return model
```
|
====================================================================================================================================
SOURCE CODE FILE: mobilenet.py
LINES: 1
SIZE: 0.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\mobilenet.py
ENCODING: utf-8
```py
from .mobilenetv2 import * # noqa: F401, F403
from .mobilenetv3 import * # noqa: F401, F403
from .mobilenetv2 import __all__ as mv2_all
from .mobilenetv3 import __all__ as mv3_all
__all__ = mv2_all + mv3_all
```
|
======================================================================================================================================
SOURCE CODE FILE: mobilenetv2.py
LINES: 1
SIZE: 5.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\mobilenetv2.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Optional, Union
from torch import nn, Tensor
from torch.ao.quantization import DeQuantStub, QuantStub
from torchvision.models.mobilenetv2 import InvertedResidual, MobileNet_V2_Weights, MobileNetV2
from ...ops.misc import Conv2dNormActivation
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableMobileNetV2",
"MobileNet_V2_QuantizedWeights",
"mobilenet_v2",
]
class QuantizableInvertedResidual(InvertedResidual):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return self.skip_add.add(x, self.conv(x))
else:
return self.conv(x)
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
for idx in range(len(self.conv)):
if type(self.conv[idx]) is nn.Conv2d:
_fuse_modules(self.conv, [str(idx), str(idx + 1)], is_qat, inplace=True)
class QuantizableMobileNetV2(MobileNetV2):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""
MobileNet V2 main class
Args:
Inherits args from floating point MobileNetV2
"""
super().__init__(*args, **kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
for m in self.modules():
if type(m) is Conv2dNormActivation:
_fuse_modules(m, ["0", "1", "2"], is_qat, inplace=True)
if type(m) is QuantizableInvertedResidual:
m.fuse_model(is_qat)
class MobileNet_V2_QuantizedWeights(WeightsEnum):
IMAGENET1K_QNNPACK_V1 = Weights(
url="https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 3504872,
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"backend": "qnnpack",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2",
"unquantized": MobileNet_V2_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 71.658,
"acc@5": 90.150,
}
},
"_ops": 0.301,
"_file_size": 3.423,
"_docs": """
These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
weights listed below.
""",
},
)
DEFAULT = IMAGENET1K_QNNPACK_V1
@register_model(name="quantized_mobilenet_v2")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: MobileNet_V2_QuantizedWeights.IMAGENET1K_QNNPACK_V1
if kwargs.get("quantize", False)
else MobileNet_V2_Weights.IMAGENET1K_V1,
)
)
def mobilenet_v2(
*,
weights: Optional[Union[MobileNet_V2_QuantizedWeights, MobileNet_V2_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableMobileNetV2:
"""
Constructs a MobileNetV2 architecture from
`MobileNetV2: Inverted Residuals and Linear Bottlenecks
<https://arxiv.org/abs/1801.04381>`_.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
quantize (bool, optional): If True, returns a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights
:members:
.. autoclass:: torchvision.models.MobileNet_V2_Weights
:members:
:noindex:
"""
weights = (MobileNet_V2_QuantizedWeights if quantize else MobileNet_V2_Weights).verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "qnnpack")
model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
======================================================================================================================================
SOURCE CODE FILE: mobilenetv3.py
LINES: 1
SIZE: 9.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\mobilenetv3.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, List, Optional, Union
import torch
from torch import nn, Tensor
from torch.ao.quantization import DeQuantStub, QuantStub
from ...ops.misc import Conv2dNormActivation, SqueezeExcitation
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from ..mobilenetv3 import (
_mobilenet_v3_conf,
InvertedResidual,
InvertedResidualConfig,
MobileNet_V3_Large_Weights,
MobileNetV3,
)
from .utils import _fuse_modules, _replace_relu
__all__ = [
"QuantizableMobileNetV3",
"MobileNet_V3_Large_QuantizedWeights",
"mobilenet_v3_large",
]
class QuantizableSqueezeExcitation(SqueezeExcitation):
_version = 2
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs["scale_activation"] = nn.Hardsigmoid
super().__init__(*args, **kwargs)
self.skip_mul = nn.quantized.FloatFunctional()
def forward(self, input: Tensor) -> Tensor:
return self.skip_mul.mul(self._scale(input), input)
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, ["fc1", "activation"], is_qat, inplace=True)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if hasattr(self, "qconfig") and (version is None or version < 2):
default_state_dict = {
"scale_activation.activation_post_process.scale": torch.tensor([1.0]),
"scale_activation.activation_post_process.activation_post_process.scale": torch.tensor([1.0]),
"scale_activation.activation_post_process.zero_point": torch.tensor([0], dtype=torch.int32),
"scale_activation.activation_post_process.activation_post_process.zero_point": torch.tensor(
[0], dtype=torch.int32
),
"scale_activation.activation_post_process.fake_quant_enabled": torch.tensor([1]),
"scale_activation.activation_post_process.observer_enabled": torch.tensor([1]),
}
for k, v in default_state_dict.items():
full_key = prefix + k
if full_key not in state_dict:
state_dict[full_key] = v
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
class QuantizableInvertedResidual(InvertedResidual):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, se_layer=QuantizableSqueezeExcitation, **kwargs) # type: ignore[misc]
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return self.skip_add.add(x, self.block(x))
else:
return self.block(x)
class QuantizableMobileNetV3(MobileNetV3):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""
MobileNet V3 main class
Args:
Inherits args from floating point MobileNetV3
"""
super().__init__(*args, **kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
for m in self.modules():
if type(m) is Conv2dNormActivation:
modules_to_fuse = ["0", "1"]
if len(m) == 3 and type(m[2]) is nn.ReLU:
modules_to_fuse.append("2")
_fuse_modules(m, modules_to_fuse, is_qat, inplace=True)
elif type(m) is QuantizableSqueezeExcitation:
m.fuse_model(is_qat)
def _mobilenet_v3_model(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: Optional[WeightsEnum],
progress: bool,
quantize: bool,
**kwargs: Any,
) -> QuantizableMobileNetV3:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "qnnpack")
model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)
_replace_relu(model)
if quantize:
# Instead of quantizing the model and then loading the quantized weights we take a different approach.
# We prepare the QAT model, load the QAT weights from training and then convert it.
# This is done to avoid extremely low accuracies observed on the specific model. This is rather a workaround
# for an unresolved bug on the eager quantization API detailed at: https://github.com/pytorch/vision/issues/5890
model.fuse_model(is_qat=True)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(backend)
torch.ao.quantization.prepare_qat(model, inplace=True)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if quantize:
torch.ao.quantization.convert(model, inplace=True)
model.eval()
return model
class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
IMAGENET1K_QNNPACK_V1 = Weights(
url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 5483032,
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"backend": "qnnpack",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
"unquantized": MobileNet_V3_Large_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 73.004,
"acc@5": 90.858,
}
},
"_ops": 0.217,
"_file_size": 21.554,
"_docs": """
These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
weights listed below.
""",
},
)
DEFAULT = IMAGENET1K_QNNPACK_V1
@register_model(name="quantized_mobilenet_v3_large")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: MobileNet_V3_Large_QuantizedWeights.IMAGENET1K_QNNPACK_V1
if kwargs.get("quantize", False)
else MobileNet_V3_Large_Weights.IMAGENET1K_V1,
)
)
def mobilenet_v3_large(
*,
weights: Optional[Union[MobileNet_V3_Large_QuantizedWeights, MobileNet_V3_Large_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableMobileNetV3:
"""
MobileNetV3 (Large) model from
`Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv3.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights
:members:
.. autoclass:: torchvision.models.MobileNet_V3_Large_Weights
:members:
:noindex:
"""
weights = (MobileNet_V3_Large_QuantizedWeights if quantize else MobileNet_V3_Large_Weights).verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
return _mobilenet_v3_model(inverted_residual_setting, last_channel, weights, progress, quantize, **kwargs)
```
|
=================================================================================================================================
SOURCE CODE FILE: resnet.py
LINES: 1
SIZE: 17.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\resnet.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch import Tensor
from torchvision.models.resnet import (
BasicBlock,
Bottleneck,
ResNet,
ResNet18_Weights,
ResNet50_Weights,
ResNeXt101_32X8D_Weights,
ResNeXt101_64X4D_Weights,
)
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableResNet",
"ResNet18_QuantizedWeights",
"ResNet50_QuantizedWeights",
"ResNeXt101_32X8D_QuantizedWeights",
"ResNeXt101_64X4D_QuantizedWeights",
"resnet18",
"resnet50",
"resnext101_32x8d",
"resnext101_64x4d",
]
class QuantizableBasicBlock(BasicBlock):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.add_relu = torch.nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.add_relu.add_relu(out, identity)
return out
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, [["conv1", "bn1", "relu"], ["conv2", "bn2"]], is_qat, inplace=True)
if self.downsample:
_fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
class QuantizableBottleneck(Bottleneck):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.skip_add_relu = nn.quantized.FloatFunctional()
self.relu1 = nn.ReLU(inplace=False)
self.relu2 = nn.ReLU(inplace=False)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip_add_relu.add_relu(out, identity)
return out
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(
self, [["conv1", "bn1", "relu1"], ["conv2", "bn2", "relu2"], ["conv3", "bn3"]], is_qat, inplace=True
)
if self.downsample:
_fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
class QuantizableResNet(ResNet):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
# Ensure scriptability
# super(QuantizableResNet,self).forward(x)
# is not scriptable
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in resnet models
Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
_fuse_modules(self, ["conv1", "bn1", "relu"], is_qat, inplace=True)
for m in self.modules():
if type(m) is QuantizableBottleneck or type(m) is QuantizableBasicBlock:
m.fuse_model(is_qat)
def _resnet(
block: Type[Union[QuantizableBasicBlock, QuantizableBottleneck]],
layers: List[int],
weights: Optional[WeightsEnum],
progress: bool,
quantize: bool,
**kwargs: Any,
) -> QuantizableResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableResNet(block, layers, **kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
}
class ResNet18_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 11689512,
"unquantized": ResNet18_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.494,
"acc@5": 88.882,
}
},
"_ops": 1.814,
"_file_size": 11.238,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
class ResNet50_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.920,
"acc@5": 92.814,
}
},
"_ops": 4.089,
"_file_size": 24.759,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V2,
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.282,
"acc@5": 94.976,
}
},
"_ops": 4.089,
"_file_size": 24.953,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.986,
"acc@5": 94.480,
}
},
"_ops": 16.414,
"_file_size": 86.034,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.574,
"acc@5": 96.132,
}
},
"_ops": 16.414,
"_file_size": 86.645,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 83455272,
"recipe": "https://github.com/pytorch/vision/pull/5935",
"unquantized": ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.898,
"acc@5": 96.326,
}
},
"_ops": 15.46,
"_file_size": 81.556,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@register_model(name="quantized_resnet18")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNet18_Weights.IMAGENET1K_V1,
)
)
def resnet18(
*,
weights: Optional[Union[ResNet18_QuantizedWeights, ResNet18_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNet-18 model from
`Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNet18_Weights
:members:
:noindex:
"""
weights = (ResNet18_QuantizedWeights if quantize else ResNet18_Weights).verify(weights)
return _resnet(QuantizableBasicBlock, [2, 2, 2, 2], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnet50")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNet50_Weights.IMAGENET1K_V1,
)
)
def resnet50(
*,
weights: Optional[Union[ResNet50_QuantizedWeights, ResNet50_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNet-50 model from
`Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNet50_Weights
:members:
:noindex:
"""
weights = (ResNet50_QuantizedWeights if quantize else ResNet50_Weights).verify(weights)
return _resnet(QuantizableBottleneck, [3, 4, 6, 3], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnext101_32x8d")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
)
)
def resnext101_32x8d(
*,
weights: Optional[Union[ResNeXt101_32X8D_QuantizedWeights, ResNeXt101_32X8D_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNeXt-101 32x8d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
:members:
:noindex:
"""
weights = (ResNeXt101_32X8D_QuantizedWeights if quantize else ResNeXt101_32X8D_Weights).verify(weights)
_ovewrite_named_param(kwargs, "groups", 32)
_ovewrite_named_param(kwargs, "width_per_group", 8)
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnext101_64x4d")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNeXt101_64X4D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
)
)
def resnext101_64x4d(
*,
weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNeXt-101 64x4d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
:members:
:noindex:
"""
weights = (ResNeXt101_64X4D_QuantizedWeights if quantize else ResNeXt101_64X4D_Weights).verify(weights)
_ovewrite_named_param(kwargs, "groups", 64)
_ovewrite_named_param(kwargs, "width_per_group", 4)
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
```
|
=======================================================================================================================================
SOURCE CODE FILE: shufflenetv2.py
LINES: 1
SIZE: 16.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\shufflenetv2.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
from torch import Tensor
from torchvision.models import shufflenetv2
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from ..shufflenetv2 import (
ShuffleNet_V2_X0_5_Weights,
ShuffleNet_V2_X1_0_Weights,
ShuffleNet_V2_X1_5_Weights,
ShuffleNet_V2_X2_0_Weights,
)
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableShuffleNetV2",
"ShuffleNet_V2_X0_5_QuantizedWeights",
"ShuffleNet_V2_X1_0_QuantizedWeights",
"ShuffleNet_V2_X1_5_QuantizedWeights",
"ShuffleNet_V2_X2_0_QuantizedWeights",
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"shufflenet_v2_x1_5",
"shufflenet_v2_x2_0",
]
class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.cat = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = self.cat.cat([x1, self.branch2(x2)], dim=1)
else:
out = self.cat.cat([self.branch1(x), self.branch2(x)], dim=1)
out = shufflenetv2.channel_shuffle(out, 2)
return out
class QuantizableShuffleNetV2(shufflenetv2.ShuffleNetV2):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, inverted_residual=QuantizableInvertedResidual, **kwargs) # type: ignore[misc]
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in shufflenetv2 model
Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
Model is modified in place.
.. note::
Note that this operation does not change numerics
and the model after modification is in floating point
"""
for name, m in self._modules.items():
if name in ["conv1", "conv5"] and m is not None:
_fuse_modules(m, [["0", "1", "2"]], is_qat, inplace=True)
for m in self.modules():
if type(m) is QuantizableInvertedResidual:
if len(m.branch1._modules.items()) > 0:
_fuse_modules(m.branch1, [["0", "1"], ["2", "3", "4"]], is_qat, inplace=True)
_fuse_modules(
m.branch2,
[["0", "1", "2"], ["3", "4"], ["5", "6", "7"]],
is_qat,
inplace=True,
)
def _shufflenetv2(
stages_repeats: List[int],
stages_out_channels: List[int],
*,
weights: Optional[WeightsEnum],
progress: bool,
quantize: bool,
**kwargs: Any,
) -> QuantizableShuffleNetV2:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableShuffleNetV2(stages_repeats, stages_out_channels, **kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
}
class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/shufflenetv2_x0.5_fbgemm-00845098.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 1366792,
"unquantized": ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 57.972,
"acc@5": 79.780,
}
},
"_ops": 0.04,
"_file_size": 1.501,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-1e62bb32.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2278604,
"unquantized": ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 68.360,
"acc@5": 87.582,
}
},
"_ops": 0.145,
"_file_size": 2.334,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
class ShuffleNet_V2_X1_5_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/shufflenetv2_x1_5_fbgemm-d7401f05.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/5906",
"num_params": 3503624,
"unquantized": ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.052,
"acc@5": 90.700,
}
},
"_ops": 0.296,
"_file_size": 3.672,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
class ShuffleNet_V2_X2_0_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/shufflenetv2_x2_0_fbgemm-5cac526c.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/5906",
"num_params": 7393996,
"unquantized": ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.354,
"acc@5": 92.488,
}
},
"_ops": 0.583,
"_file_size": 7.467,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@register_model(name="quantized_shufflenet_v2_x0_5")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ShuffleNet_V2_X0_5_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1,
)
)
def shufflenet_v2_x0_5(
*,
weights: Optional[Union[ShuffleNet_V2_X0_5_QuantizedWeights, ShuffleNet_V2_X0_5_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 0.5x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr.
Default is True.
quantize (bool, optional): If True, return a quantized version of the model.
Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
:members:
:noindex:
"""
weights = (ShuffleNet_V2_X0_5_QuantizedWeights if quantize else ShuffleNet_V2_X0_5_Weights).verify(weights)
return _shufflenetv2(
[4, 8, 4], [24, 48, 96, 192, 1024], weights=weights, progress=progress, quantize=quantize, **kwargs
)
@register_model(name="quantized_shufflenet_v2_x1_0")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ShuffleNet_V2_X1_0_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1,
)
)
def shufflenet_v2_x1_0(
*,
weights: Optional[Union[ShuffleNet_V2_X1_0_QuantizedWeights, ShuffleNet_V2_X1_0_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 1.0x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr.
Default is True.
quantize (bool, optional): If True, return a quantized version of the model.
Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
:members:
:noindex:
"""
weights = (ShuffleNet_V2_X1_0_QuantizedWeights if quantize else ShuffleNet_V2_X1_0_Weights).verify(weights)
return _shufflenetv2(
[4, 8, 4], [24, 116, 232, 464, 1024], weights=weights, progress=progress, quantize=quantize, **kwargs
)
@register_model(name="quantized_shufflenet_v2_x1_5")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ShuffleNet_V2_X1_5_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1,
)
)
def shufflenet_v2_x1_5(
*,
weights: Optional[Union[ShuffleNet_V2_X1_5_QuantizedWeights, ShuffleNet_V2_X1_5_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 1.5x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr.
Default is True.
quantize (bool, optional): If True, return a quantized version of the model.
Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
:members:
:noindex:
"""
weights = (ShuffleNet_V2_X1_5_QuantizedWeights if quantize else ShuffleNet_V2_X1_5_Weights).verify(weights)
return _shufflenetv2(
[4, 8, 4], [24, 176, 352, 704, 1024], weights=weights, progress=progress, quantize=quantize, **kwargs
)
@register_model(name="quantized_shufflenet_v2_x2_0")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ShuffleNet_V2_X2_0_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1,
)
)
def shufflenet_v2_x2_0(
*,
weights: Optional[Union[ShuffleNet_V2_X2_0_QuantizedWeights, ShuffleNet_V2_X2_0_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 2.0x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr.
Default is True.
quantize (bool, optional): If True, return a quantized version of the model.
Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
:members:
:noindex:
"""
weights = (ShuffleNet_V2_X2_0_QuantizedWeights if quantize else ShuffleNet_V2_X2_0_Weights).verify(weights)
return _shufflenetv2(
[4, 8, 4], [24, 244, 488, 976, 2048], weights=weights, progress=progress, quantize=quantize, **kwargs
)
```
|
================================================================================================================================
SOURCE CODE FILE: utils.py
LINES: 1
SIZE: 2.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\quantization\utils.py
ENCODING: utf-8
```py
from typing import Any, List, Optional, Union
import torch
from torch import nn
def _replace_relu(module: nn.Module) -> None:
reassign = {}
for name, mod in module.named_children():
_replace_relu(mod)
# Checking for explicit type instead of instance
# as we only want to replace modules of the exact type
# not inherited classes
if type(mod) is nn.ReLU or type(mod) is nn.ReLU6:
reassign[name] = nn.ReLU(inplace=False)
for key, value in reassign.items():
module._modules[key] = value
def quantize_model(model: nn.Module, backend: str) -> None:
_dummy_input_data = torch.rand(1, 3, 299, 299)
if backend not in torch.backends.quantized.supported_engines:
raise RuntimeError("Quantized backend not supported ")
torch.backends.quantized.engine = backend
model.eval()
# Make sure that weight qconfig matches that of the serialized models
if backend == "fbgemm":
model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
activation=torch.ao.quantization.default_observer,
weight=torch.ao.quantization.default_per_channel_weight_observer,
)
elif backend == "qnnpack":
model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
activation=torch.ao.quantization.default_observer, weight=torch.ao.quantization.default_weight_observer
)
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
model.fuse_model() # type: ignore[operator]
torch.ao.quantization.prepare(model, inplace=True)
model(_dummy_input_data)
torch.ao.quantization.convert(model, inplace=True)
def _fuse_modules(
model: nn.Module, modules_to_fuse: Union[List[str], List[List[str]]], is_qat: Optional[bool], **kwargs: Any
):
if is_qat is None:
is_qat = model.training
method = torch.ao.quantization.fuse_modules_qat if is_qat else torch.ao.quantization.fuse_modules
return method(model, modules_to_fuse, **kwargs)
```
|
====================================================================================================================
SOURCE CODE FILE: regnet.py
LINES: 1
SIZE: 63.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\regnet.py
ENCODING: utf-8
```py
import math
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation
from ..transforms._presets import ImageClassification, InterpolationMode
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
__all__ = [
"RegNet",
"RegNet_Y_400MF_Weights",
"RegNet_Y_800MF_Weights",
"RegNet_Y_1_6GF_Weights",
"RegNet_Y_3_2GF_Weights",
"RegNet_Y_8GF_Weights",
"RegNet_Y_16GF_Weights",
"RegNet_Y_32GF_Weights",
"RegNet_Y_128GF_Weights",
"RegNet_X_400MF_Weights",
"RegNet_X_800MF_Weights",
"RegNet_X_1_6GF_Weights",
"RegNet_X_3_2GF_Weights",
"RegNet_X_8GF_Weights",
"RegNet_X_16GF_Weights",
"RegNet_X_32GF_Weights",
"regnet_y_400mf",
"regnet_y_800mf",
"regnet_y_1_6gf",
"regnet_y_3_2gf",
"regnet_y_8gf",
"regnet_y_16gf",
"regnet_y_32gf",
"regnet_y_128gf",
"regnet_x_400mf",
"regnet_x_800mf",
"regnet_x_1_6gf",
"regnet_x_3_2gf",
"regnet_x_8gf",
"regnet_x_16gf",
"regnet_x_32gf",
]
class SimpleStemIN(Conv2dNormActivation):
"""Simple stem for ImageNet: 3x3, BN, ReLU."""
def __init__(
self,
width_in: int,
width_out: int,
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
) -> None:
super().__init__(
width_in, width_out, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=activation_layer
)
class BottleneckTransform(nn.Sequential):
"""Bottleneck transformation: 1x1, 3x3 [+SE], 1x1."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
group_width: int,
bottleneck_multiplier: float,
se_ratio: Optional[float],
) -> None:
layers: OrderedDict[str, nn.Module] = OrderedDict()
w_b = int(round(width_out * bottleneck_multiplier))
g = w_b // group_width
layers["a"] = Conv2dNormActivation(
width_in, w_b, kernel_size=1, stride=1, norm_layer=norm_layer, activation_layer=activation_layer
)
layers["b"] = Conv2dNormActivation(
w_b, w_b, kernel_size=3, stride=stride, groups=g, norm_layer=norm_layer, activation_layer=activation_layer
)
if se_ratio:
# The SE reduction ratio is defined with respect to the
# beginning of the block
width_se_out = int(round(se_ratio * width_in))
layers["se"] = SqueezeExcitation(
input_channels=w_b,
squeeze_channels=width_se_out,
activation=activation_layer,
)
layers["c"] = Conv2dNormActivation(
w_b, width_out, kernel_size=1, stride=1, norm_layer=norm_layer, activation_layer=None
)
super().__init__(layers)
class ResBottleneckBlock(nn.Module):
"""Residual bottleneck block: x + F(x), F = bottleneck transform."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
group_width: int = 1,
bottleneck_multiplier: float = 1.0,
se_ratio: Optional[float] = None,
) -> None:
super().__init__()
# Use skip connection with projection if shape changes
self.proj = None
should_proj = (width_in != width_out) or (stride != 1)
if should_proj:
self.proj = Conv2dNormActivation(
width_in, width_out, kernel_size=1, stride=stride, norm_layer=norm_layer, activation_layer=None
)
self.f = BottleneckTransform(
width_in,
width_out,
stride,
norm_layer,
activation_layer,
group_width,
bottleneck_multiplier,
se_ratio,
)
self.activation = activation_layer(inplace=True)
def forward(self, x: Tensor) -> Tensor:
if self.proj is not None:
x = self.proj(x) + self.f(x)
else:
x = x + self.f(x)
return self.activation(x)
class AnyStage(nn.Sequential):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
depth: int,
block_constructor: Callable[..., nn.Module],
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
group_width: int,
bottleneck_multiplier: float,
se_ratio: Optional[float] = None,
stage_index: int = 0,
) -> None:
super().__init__()
for i in range(depth):
block = block_constructor(
width_in if i == 0 else width_out,
width_out,
stride if i == 0 else 1,
norm_layer,
activation_layer,
group_width,
bottleneck_multiplier,
se_ratio,
)
self.add_module(f"block{stage_index}-{i}", block)
class BlockParams:
def __init__(
self,
depths: List[int],
widths: List[int],
group_widths: List[int],
bottleneck_multipliers: List[float],
strides: List[int],
se_ratio: Optional[float] = None,
) -> None:
self.depths = depths
self.widths = widths
self.group_widths = group_widths
self.bottleneck_multipliers = bottleneck_multipliers
self.strides = strides
self.se_ratio = se_ratio
@classmethod
def from_init_params(
cls,
depth: int,
w_0: int,
w_a: float,
w_m: float,
group_width: int,
bottleneck_multiplier: float = 1.0,
se_ratio: Optional[float] = None,
**kwargs: Any,
) -> "BlockParams":
"""
Programmatically compute all the per-block settings,
given the RegNet parameters.
The first step is to compute the quantized linear block parameters,
in log space. Key parameters are:
- `w_a` is the width progression slope
- `w_0` is the initial width
- `w_m` is the width stepping in the log space
In other terms
`log(block_width) = log(w_0) + w_m * block_capacity`,
with `bock_capacity` ramping up following the w_0 and w_a params.
This block width is finally quantized to multiples of 8.
The second step is to compute the parameters per stage,
taking into account the skip connection and the final 1x1 convolutions.
We use the fact that the output width is constant within a stage.
"""
QUANT = 8
STRIDE = 2
if w_a < 0 or w_0 <= 0 or w_m <= 1 or w_0 % 8 != 0:
raise ValueError("Invalid RegNet settings")
# Compute the block widths. Each stage has one unique block width
widths_cont = torch.arange(depth) * w_a + w_0
block_capacity = torch.round(torch.log(widths_cont / w_0) / math.log(w_m))
block_widths = (torch.round(torch.divide(w_0 * torch.pow(w_m, block_capacity), QUANT)) * QUANT).int().tolist()
num_stages = len(set(block_widths))
# Convert to per stage parameters
split_helper = zip(
block_widths + [0],
[0] + block_widths,
block_widths + [0],
[0] + block_widths,
)
splits = [w != wp or r != rp for w, wp, r, rp in split_helper]
stage_widths = [w for w, t in zip(block_widths, splits[:-1]) if t]
stage_depths = torch.diff(torch.tensor([d for d, t in enumerate(splits) if t])).int().tolist()
strides = [STRIDE] * num_stages
bottleneck_multipliers = [bottleneck_multiplier] * num_stages
group_widths = [group_width] * num_stages
# Adjust the compatibility of stage widths and group widths
stage_widths, group_widths = cls._adjust_widths_groups_compatibilty(
stage_widths, bottleneck_multipliers, group_widths
)
return cls(
depths=stage_depths,
widths=stage_widths,
group_widths=group_widths,
bottleneck_multipliers=bottleneck_multipliers,
strides=strides,
se_ratio=se_ratio,
)
def _get_expanded_params(self):
return zip(self.widths, self.strides, self.depths, self.group_widths, self.bottleneck_multipliers)
@staticmethod
def _adjust_widths_groups_compatibilty(
stage_widths: List[int], bottleneck_ratios: List[float], group_widths: List[int]
) -> Tuple[List[int], List[int]]:
"""
Adjusts the compatibility of widths and groups,
depending on the bottleneck ratio.
"""
# Compute all widths for the current settings
widths = [int(w * b) for w, b in zip(stage_widths, bottleneck_ratios)]
group_widths_min = [min(g, w_bot) for g, w_bot in zip(group_widths, widths)]
# Compute the adjusted widths so that stage and group widths fit
ws_bot = [_make_divisible(w_bot, g) for w_bot, g in zip(widths, group_widths_min)]
stage_widths = [int(w_bot / b) for w_bot, b in zip(ws_bot, bottleneck_ratios)]
return stage_widths, group_widths_min
class RegNet(nn.Module):
def __init__(
self,
block_params: BlockParams,
num_classes: int = 1000,
stem_width: int = 32,
stem_type: Optional[Callable[..., nn.Module]] = None,
block_type: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once(self)
if stem_type is None:
stem_type = SimpleStemIN
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if block_type is None:
block_type = ResBottleneckBlock
if activation is None:
activation = nn.ReLU
# Ad hoc stem
self.stem = stem_type(
3, # width_in
stem_width,
norm_layer,
activation,
)
current_width = stem_width
blocks = []
for i, (
width_out,
stride,
depth,
group_width,
bottleneck_multiplier,
) in enumerate(block_params._get_expanded_params()):
blocks.append(
(
f"block{i+1}",
AnyStage(
current_width,
width_out,
stride,
depth,
block_type,
norm_layer,
activation,
group_width,
bottleneck_multiplier,
block_params.se_ratio,
stage_index=i + 1,
),
)
)
current_width = width_out
self.trunk_output = nn.Sequential(OrderedDict(blocks))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_features=current_width, out_features=num_classes)
# Performs ResNet-style weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
nn.init.normal_(m.weight, mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
nn.init.zeros_(m.bias)
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
x = self.trunk_output(x)
x = self.avgpool(x)
x = x.flatten(start_dim=1)
x = self.fc(x)
return x
def _regnet(
block_params: BlockParams,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> RegNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
norm_layer = kwargs.pop("norm_layer", partial(nn.BatchNorm2d, eps=1e-05, momentum=0.1))
model = RegNet(block_params, norm_layer=norm_layer, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META: Dict[str, Any] = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
}
_COMMON_SWAG_META = {
**_COMMON_META,
"recipe": "https://github.com/facebookresearch/SWAG",
"license": "https://github.com/facebookresearch/SWAG/blob/main/LICENSE",
}
class RegNet_Y_400MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_400mf-c65dace8.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.046,
"acc@5": 91.716,
}
},
"_ops": 0.402,
"_file_size": 16.806,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.804,
"acc@5": 92.742,
}
},
"_ops": 0.402,
"_file_size": 16.806,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_800MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_800mf-1b27b58c.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.420,
"acc@5": 93.136,
}
},
"_ops": 0.834,
"_file_size": 24.774,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.828,
"acc@5": 94.502,
}
},
"_ops": 0.834,
"_file_size": 24.774,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_1_6GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_1_6gf-b11a554e.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.950,
"acc@5": 93.966,
}
},
"_ops": 1.612,
"_file_size": 43.152,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.876,
"acc@5": 95.444,
}
},
"_ops": 1.612,
"_file_size": 43.152,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_3_2GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_3_2gf-b5a9779c.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.948,
"acc@5": 94.576,
}
},
"_ops": 3.176,
"_file_size": 74.567,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.982,
"acc@5": 95.972,
}
},
"_ops": 3.176,
"_file_size": 74.567,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_8GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_8gf-d0d0e4a8.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.032,
"acc@5": 95.048,
}
},
"_ops": 8.473,
"_file_size": 150.701,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.828,
"acc@5": 96.330,
}
},
"_ops": 8.473,
"_file_size": 150.701,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_16GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf-9e6ed7dd.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.424,
"acc@5": 95.240,
}
},
"_ops": 15.912,
"_file_size": 319.49,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.886,
"acc@5": 96.328,
}
},
"_ops": 15.912,
"_file_size": 319.49,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth",
transforms=partial(
ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"num_params": 83590140,
"_metrics": {
"ImageNet-1K": {
"acc@1": 86.012,
"acc@5": 98.054,
}
},
"_ops": 46.735,
"_file_size": 319.49,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 83590140,
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.976,
"acc@5": 97.244,
}
},
"_ops": 15.912,
"_file_size": 319.49,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_32GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf-4dee3f7a.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.878,
"acc@5": 95.340,
}
},
"_ops": 32.28,
"_file_size": 554.076,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.368,
"acc@5": 96.498,
}
},
"_ops": 32.28,
"_file_size": 554.076,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth",
transforms=partial(
ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"num_params": 145046770,
"_metrics": {
"ImageNet-1K": {
"acc@1": 86.838,
"acc@5": 98.362,
}
},
"_ops": 94.826,
"_file_size": 554.076,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 145046770,
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.622,
"acc@5": 97.480,
}
},
"_ops": 32.28,
"_file_size": 554.076,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_128GF_Weights(WeightsEnum):
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth",
transforms=partial(
ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"num_params": 644812894,
"_metrics": {
"ImageNet-1K": {
"acc@1": 88.228,
"acc@5": 98.682,
}
},
"_ops": 374.57,
"_file_size": 2461.564,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 644812894,
"_metrics": {
"ImageNet-1K": {
"acc@1": 86.068,
"acc@5": 97.844,
}
},
"_ops": 127.518,
"_file_size": 2461.564,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_SWAG_E2E_V1
class RegNet_X_400MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_400mf-adf1edd5.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.834,
"acc@5": 90.950,
}
},
"_ops": 0.414,
"_file_size": 21.258,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.864,
"acc@5": 92.322,
}
},
"_ops": 0.414,
"_file_size": 21.257,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_800MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_800mf-ad17e45c.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.212,
"acc@5": 92.348,
}
},
"_ops": 0.8,
"_file_size": 27.945,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.522,
"acc@5": 93.826,
}
},
"_ops": 0.8,
"_file_size": 27.945,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_1_6GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_1_6gf-e3633e7f.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.040,
"acc@5": 93.440,
}
},
"_ops": 1.603,
"_file_size": 35.339,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 79.668,
"acc@5": 94.922,
}
},
"_ops": 1.603,
"_file_size": 35.339,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_3_2GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_3_2gf-f342aeae.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.364,
"acc@5": 93.992,
}
},
"_ops": 3.177,
"_file_size": 58.756,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.196,
"acc@5": 95.430,
}
},
"_ops": 3.177,
"_file_size": 58.756,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_8GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_8gf-03ceed89.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 79.344,
"acc@5": 94.686,
}
},
"_ops": 7.995,
"_file_size": 151.456,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.682,
"acc@5": 95.678,
}
},
"_ops": 7.995,
"_file_size": 151.456,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_16GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_16gf-2007eb11.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.058,
"acc@5": 94.944,
}
},
"_ops": 15.941,
"_file_size": 207.627,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.716,
"acc@5": 96.196,
}
},
"_ops": 15.941,
"_file_size": 207.627,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_32GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_32gf-9d47f8d0.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.622,
"acc@5": 95.248,
}
},
"_ops": 31.736,
"_file_size": 412.039,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.014,
"acc@5": 96.288,
}
},
"_ops": 31.736,
"_file_size": 412.039,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_400MF_Weights.IMAGENET1K_V1))
def regnet_y_400mf(*, weights: Optional[RegNet_Y_400MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_400MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_400MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_400MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_400MF_Weights
:members:
"""
weights = RegNet_Y_400MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=16, w_0=48, w_a=27.89, w_m=2.09, group_width=8, se_ratio=0.25, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_800MF_Weights.IMAGENET1K_V1))
def regnet_y_800mf(*, weights: Optional[RegNet_Y_800MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_800MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_800MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_800MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_800MF_Weights
:members:
"""
weights = RegNet_Y_800MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=14, w_0=56, w_a=38.84, w_m=2.4, group_width=16, se_ratio=0.25, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_1_6GF_Weights.IMAGENET1K_V1))
def regnet_y_1_6gf(*, weights: Optional[RegNet_Y_1_6GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_1.6GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_1_6GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_1_6GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_1_6GF_Weights
:members:
"""
weights = RegNet_Y_1_6GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=27, w_0=48, w_a=20.71, w_m=2.65, group_width=24, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_3_2GF_Weights.IMAGENET1K_V1))
def regnet_y_3_2gf(*, weights: Optional[RegNet_Y_3_2GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_3.2GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_3_2GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_3_2GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_3_2GF_Weights
:members:
"""
weights = RegNet_Y_3_2GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=21, w_0=80, w_a=42.63, w_m=2.66, group_width=24, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_8GF_Weights.IMAGENET1K_V1))
def regnet_y_8gf(*, weights: Optional[RegNet_Y_8GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_8GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_8GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_8GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_8GF_Weights
:members:
"""
weights = RegNet_Y_8GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=17, w_0=192, w_a=76.82, w_m=2.19, group_width=56, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_16GF_Weights.IMAGENET1K_V1))
def regnet_y_16gf(*, weights: Optional[RegNet_Y_16GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_16GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_16GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_16GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_16GF_Weights
:members:
"""
weights = RegNet_Y_16GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=18, w_0=200, w_a=106.23, w_m=2.48, group_width=112, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_32GF_Weights.IMAGENET1K_V1))
def regnet_y_32gf(*, weights: Optional[RegNet_Y_32GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_32GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_32GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_32GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_32GF_Weights
:members:
"""
weights = RegNet_Y_32GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=20, w_0=232, w_a=115.89, w_m=2.53, group_width=232, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def regnet_y_128gf(*, weights: Optional[RegNet_Y_128GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_128GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_128GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_128GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_128GF_Weights
:members:
"""
weights = RegNet_Y_128GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=27, w_0=456, w_a=160.83, w_m=2.52, group_width=264, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_400MF_Weights.IMAGENET1K_V1))
def regnet_x_400mf(*, weights: Optional[RegNet_X_400MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_400MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_400MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_400MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_400MF_Weights
:members:
"""
weights = RegNet_X_400MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=22, w_0=24, w_a=24.48, w_m=2.54, group_width=16, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_800MF_Weights.IMAGENET1K_V1))
def regnet_x_800mf(*, weights: Optional[RegNet_X_800MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_800MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_800MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_800MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_800MF_Weights
:members:
"""
weights = RegNet_X_800MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=16, w_0=56, w_a=35.73, w_m=2.28, group_width=16, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_1_6GF_Weights.IMAGENET1K_V1))
def regnet_x_1_6gf(*, weights: Optional[RegNet_X_1_6GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_1.6GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_1_6GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_1_6GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_1_6GF_Weights
:members:
"""
weights = RegNet_X_1_6GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=18, w_0=80, w_a=34.01, w_m=2.25, group_width=24, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_3_2GF_Weights.IMAGENET1K_V1))
def regnet_x_3_2gf(*, weights: Optional[RegNet_X_3_2GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_3.2GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_3_2GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_3_2GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_3_2GF_Weights
:members:
"""
weights = RegNet_X_3_2GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=25, w_0=88, w_a=26.31, w_m=2.25, group_width=48, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_8GF_Weights.IMAGENET1K_V1))
def regnet_x_8gf(*, weights: Optional[RegNet_X_8GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_8GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_8GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_8GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_8GF_Weights
:members:
"""
weights = RegNet_X_8GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=23, w_0=80, w_a=49.56, w_m=2.88, group_width=120, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_16GF_Weights.IMAGENET1K_V1))
def regnet_x_16gf(*, weights: Optional[RegNet_X_16GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_16GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_16GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_16GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_16GF_Weights
:members:
"""
weights = RegNet_X_16GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=22, w_0=216, w_a=55.59, w_m=2.1, group_width=128, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_32GF_Weights.IMAGENET1K_V1))
def regnet_x_32gf(*, weights: Optional[RegNet_X_32GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_32GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_32GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_32GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_32GF_Weights
:members:
"""
weights = RegNet_X_32GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=23, w_0=320, w_a=69.86, w_m=2.0, group_width=168, **kwargs)
return _regnet(params, weights, progress, **kwargs)
```
|
====================================================================================================================
SOURCE CODE FILE: resnet.py
LINES: 1
SIZE: 38.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\resnet.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch import Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"ResNet",
"ResNet18_Weights",
"ResNet34_Weights",
"ResNet50_Weights",
"ResNet101_Weights",
"ResNet152_Weights",
"ResNeXt50_32X4D_Weights",
"ResNeXt101_32X8D_Weights",
"ResNeXt101_64X4D_Weights",
"Wide_ResNet50_2_Weights",
"Wide_ResNet101_2_Weights",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"resnext101_64x4d",
"wide_resnet50_2",
"wide_resnet101_2",
]
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition" https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once(self)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> ResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = ResNet(block, layers, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
}
class ResNet18_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnet18-f37072fd.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 11689512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.758,
"acc@5": 89.078,
}
},
"_ops": 1.814,
"_file_size": 44.661,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
class ResNet34_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnet34-b627a593.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 21797672,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"_metrics": {
"ImageNet-1K": {
"acc@1": 73.314,
"acc@5": 91.420,
}
},
"_ops": 3.664,
"_file_size": 83.275,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
class ResNet50_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnet50-0676ba61.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.130,
"acc@5": 92.862,
}
},
"_ops": 4.089,
"_file_size": 97.781,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/resnet50-11ad3fa6.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/issues/3995#issuecomment-1013906621",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.858,
"acc@5": 95.434,
}
},
"_ops": 4.089,
"_file_size": 97.79,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class ResNet101_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnet101-63fe2227.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.374,
"acc@5": 93.546,
}
},
"_ops": 7.801,
"_file_size": 170.511,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/resnet101-cd907fc2.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.886,
"acc@5": 95.780,
}
},
"_ops": 7.801,
"_file_size": 170.53,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class ResNet152_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnet152-394f9c45.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.312,
"acc@5": 94.046,
}
},
"_ops": 11.514,
"_file_size": 230.434,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/resnet152-f82ba261.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.284,
"acc@5": 96.002,
}
},
"_ops": 11.514,
"_file_size": 230.474,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class ResNeXt50_32X4D_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.618,
"acc@5": 93.698,
}
},
"_ops": 4.23,
"_file_size": 95.789,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.198,
"acc@5": 95.340,
}
},
"_ops": 4.23,
"_file_size": 95.833,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class ResNeXt101_32X8D_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"_metrics": {
"ImageNet-1K": {
"acc@1": 79.312,
"acc@5": 94.526,
}
},
"_ops": 16.414,
"_file_size": 339.586,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.834,
"acc@5": 96.228,
}
},
"_ops": 16.414,
"_file_size": 339.673,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class ResNeXt101_64X4D_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 83455272,
"recipe": "https://github.com/pytorch/vision/pull/5935",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.246,
"acc@5": 96.454,
}
},
"_ops": 15.46,
"_file_size": 319.318,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
class Wide_ResNet50_2_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.468,
"acc@5": 94.086,
}
},
"_ops": 11.398,
"_file_size": 131.82,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.602,
"acc@5": 95.758,
}
},
"_ops": 11.398,
"_file_size": 263.124,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class Wide_ResNet101_2_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.848,
"acc@5": 94.284,
}
},
"_ops": 22.753,
"_file_size": 242.896,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.510,
"acc@5": 96.020,
}
},
"_ops": 22.753,
"_file_size": 484.747,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNet18_Weights.IMAGENET1K_V1))
def resnet18(*, weights: Optional[ResNet18_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-18 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
Args:
weights (:class:`~torchvision.models.ResNet18_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNet18_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNet18_Weights
:members:
"""
weights = ResNet18_Weights.verify(weights)
return _resnet(BasicBlock, [2, 2, 2, 2], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNet34_Weights.IMAGENET1K_V1))
def resnet34(*, weights: Optional[ResNet34_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-34 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
Args:
weights (:class:`~torchvision.models.ResNet34_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNet34_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNet34_Weights
:members:
"""
weights = ResNet34_Weights.verify(weights)
return _resnet(BasicBlock, [3, 4, 6, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNet50_Weights.IMAGENET1K_V1))
def resnet50(*, weights: Optional[ResNet50_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-50 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
.. note::
The bottleneck of TorchVision places the stride for downsampling to the second 3x3
convolution while the original paper places it to the first 1x1 convolution.
This variant improves the accuracy and is known as `ResNet V1.5
<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
Args:
weights (:class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNet50_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNet50_Weights
:members:
"""
weights = ResNet50_Weights.verify(weights)
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNet101_Weights.IMAGENET1K_V1))
def resnet101(*, weights: Optional[ResNet101_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-101 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
.. note::
The bottleneck of TorchVision places the stride for downsampling to the second 3x3
convolution while the original paper places it to the first 1x1 convolution.
This variant improves the accuracy and is known as `ResNet V1.5
<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
Args:
weights (:class:`~torchvision.models.ResNet101_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNet101_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNet101_Weights
:members:
"""
weights = ResNet101_Weights.verify(weights)
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNet152_Weights.IMAGENET1K_V1))
def resnet152(*, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-152 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
.. note::
The bottleneck of TorchVision places the stride for downsampling to the second 3x3
convolution while the original paper places it to the first 1x1 convolution.
This variant improves the accuracy and is known as `ResNet V1.5
<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
Args:
weights (:class:`~torchvision.models.ResNet152_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNet152_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNet152_Weights
:members:
"""
weights = ResNet152_Weights.verify(weights)
return _resnet(Bottleneck, [3, 8, 36, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNeXt50_32X4D_Weights.IMAGENET1K_V1))
def resnext50_32x4d(
*, weights: Optional[ResNeXt50_32X4D_Weights] = None, progress: bool = True, **kwargs: Any
) -> ResNet:
"""ResNeXt-50 32x4d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
Args:
weights (:class:`~torchvision.models.ResNeXt50_32X4D_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNext50_32X4D_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNeXt50_32X4D_Weights
:members:
"""
weights = ResNeXt50_32X4D_Weights.verify(weights)
_ovewrite_named_param(kwargs, "groups", 32)
_ovewrite_named_param(kwargs, "width_per_group", 4)
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNeXt101_32X8D_Weights.IMAGENET1K_V1))
def resnext101_32x8d(
*, weights: Optional[ResNeXt101_32X8D_Weights] = None, progress: bool = True, **kwargs: Any
) -> ResNet:
"""ResNeXt-101 32x8d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
Args:
weights (:class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNeXt101_32X8D_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
:members:
"""
weights = ResNeXt101_32X8D_Weights.verify(weights)
_ovewrite_named_param(kwargs, "groups", 32)
_ovewrite_named_param(kwargs, "width_per_group", 8)
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNeXt101_64X4D_Weights.IMAGENET1K_V1))
def resnext101_64x4d(
*, weights: Optional[ResNeXt101_64X4D_Weights] = None, progress: bool = True, **kwargs: Any
) -> ResNet:
"""ResNeXt-101 64x4d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
Args:
weights (:class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ResNeXt101_64X4D_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
:members:
"""
weights = ResNeXt101_64X4D_Weights.verify(weights)
_ovewrite_named_param(kwargs, "groups", 64)
_ovewrite_named_param(kwargs, "width_per_group", 4)
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", Wide_ResNet50_2_Weights.IMAGENET1K_V1))
def wide_resnet50_2(
*, weights: Optional[Wide_ResNet50_2_Weights] = None, progress: bool = True, **kwargs: Any
) -> ResNet:
"""Wide ResNet-50-2 model from
`Wide Residual Networks <https://arxiv.org/abs/1605.07146>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
weights (:class:`~torchvision.models.Wide_ResNet50_2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Wide_ResNet50_2_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Wide_ResNet50_2_Weights
:members:
"""
weights = Wide_ResNet50_2_Weights.verify(weights)
_ovewrite_named_param(kwargs, "width_per_group", 64 * 2)
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", Wide_ResNet101_2_Weights.IMAGENET1K_V1))
def wide_resnet101_2(
*, weights: Optional[Wide_ResNet101_2_Weights] = None, progress: bool = True, **kwargs: Any
) -> ResNet:
"""Wide ResNet-101-2 model from
`Wide Residual Networks <https://arxiv.org/abs/1605.07146>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-101 has 2048-512-2048
channels, and in Wide ResNet-101-2 has 2048-1024-2048.
Args:
weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Wide_ResNet101_2_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Wide_ResNet101_2_Weights
:members:
"""
weights = Wide_ResNet101_2_Weights.verify(weights)
_ovewrite_named_param(kwargs, "width_per_group", 64 * 2)
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
```
|
===================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\segmentation\__init__.py
ENCODING: utf-8
```py
from .deeplabv3 import *
from .fcn import *
from .lraspp import *
```
|
=================================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 1.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\segmentation\_utils.py
ENCODING: utf-8
```py
from collections import OrderedDict
from typing import Dict, Optional
from torch import nn, Tensor
from torch.nn import functional as F
from ...utils import _log_api_usage_once
class _SimpleSegmentationModel(nn.Module):
__constants__ = ["aux_classifier"]
def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None:
super().__init__()
_log_api_usage_once(self)
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
def forward(self, x: Tensor) -> Dict[str, Tensor]:
input_shape = x.shape[-2:]
# contract: features is a dict of tensors
features = self.backbone(x)
result = OrderedDict()
x = features["out"]
x = self.classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["out"] = x
if self.aux_classifier is not None:
x = features["aux"]
x = self.aux_classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["aux"] = x
return result
```
|
====================================================================================================================================
SOURCE CODE FILE: deeplabv3.py
LINES: 1
SIZE: 15.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\segmentation\deeplabv3.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Optional, Sequence
import torch
from torch import nn
from torch.nn import functional as F
from ...transforms._presets import SemanticSegmentation
from .._api import register_model, Weights, WeightsEnum
from .._meta import _VOC_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface, IntermediateLayerGetter
from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights, MobileNetV3
from ..resnet import ResNet, resnet101, ResNet101_Weights, resnet50, ResNet50_Weights
from ._utils import _SimpleSegmentationModel
from .fcn import FCNHead
__all__ = [
"DeepLabV3",
"DeepLabV3_ResNet50_Weights",
"DeepLabV3_ResNet101_Weights",
"DeepLabV3_MobileNet_V3_Large_Weights",
"deeplabv3_mobilenet_v3_large",
"deeplabv3_resnet50",
"deeplabv3_resnet101",
]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Args:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabHead(nn.Sequential):
def __init__(self, in_channels: int, num_classes: int, atrous_rates: Sequence[int] = (12, 24, 36)) -> None:
super().__init__(
ASPP(in_channels, atrous_rates),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, num_classes, 1),
)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int, dilation: int) -> None:
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
]
super().__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode="bilinear", align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels: int, atrous_rates: Sequence[int], out_channels: int = 256) -> None:
super().__init__()
modules = []
modules.append(
nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
)
rates = tuple(atrous_rates)
for rate in rates:
modules.append(ASPPConv(in_channels, out_channels, rate))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
_res = []
for conv in self.convs:
_res.append(conv(x))
res = torch.cat(_res, dim=1)
return self.project(res)
def _deeplabv3_resnet(
backbone: ResNet,
num_classes: int,
aux: Optional[bool],
) -> DeepLabV3:
return_layers = {"layer4": "out"}
if aux:
return_layers["layer3"] = "aux"
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
aux_classifier = FCNHead(1024, num_classes) if aux else None
classifier = DeepLabHead(2048, num_classes)
return DeepLabV3(backbone, classifier, aux_classifier)
_COMMON_META = {
"categories": _VOC_CATEGORIES,
"min_size": (1, 1),
"_docs": """
These weights were trained on a subset of COCO, using only the 20 categories that are present in the Pascal VOC
dataset.
""",
}
class DeepLabV3_ResNet50_Weights(WeightsEnum):
COCO_WITH_VOC_LABELS_V1 = Weights(
url="https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth",
transforms=partial(SemanticSegmentation, resize_size=520),
meta={
**_COMMON_META,
"num_params": 42004074,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50",
"_metrics": {
"COCO-val2017-VOC-labels": {
"miou": 66.4,
"pixel_acc": 92.4,
}
},
"_ops": 178.722,
"_file_size": 160.515,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
class DeepLabV3_ResNet101_Weights(WeightsEnum):
COCO_WITH_VOC_LABELS_V1 = Weights(
url="https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth",
transforms=partial(SemanticSegmentation, resize_size=520),
meta={
**_COMMON_META,
"num_params": 60996202,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101",
"_metrics": {
"COCO-val2017-VOC-labels": {
"miou": 67.4,
"pixel_acc": 92.4,
}
},
"_ops": 258.743,
"_file_size": 233.217,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum):
COCO_WITH_VOC_LABELS_V1 = Weights(
url="https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth",
transforms=partial(SemanticSegmentation, resize_size=520),
meta={
**_COMMON_META,
"num_params": 11029328,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large",
"_metrics": {
"COCO-val2017-VOC-labels": {
"miou": 60.3,
"pixel_acc": 91.2,
}
},
"_ops": 10.452,
"_file_size": 42.301,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
def _deeplabv3_mobilenetv3(
backbone: MobileNetV3,
num_classes: int,
aux: Optional[bool],
) -> DeepLabV3:
backbone = backbone.features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
out_pos = stage_indices[-1] # use C5 which has output_stride = 16
out_inplanes = backbone[out_pos].out_channels
aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8
aux_inplanes = backbone[aux_pos].out_channels
return_layers = {str(out_pos): "out"}
if aux:
return_layers[str(aux_pos)] = "aux"
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
aux_classifier = FCNHead(aux_inplanes, num_classes) if aux else None
classifier = DeepLabHead(out_inplanes, num_classes)
return DeepLabV3(backbone, classifier, aux_classifier)
@register_model()
@handle_legacy_interface(
weights=("pretrained", DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def deeplabv3_resnet50(
*,
weights: Optional[DeepLabV3_ResNet50_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
aux_loss: Optional[bool] = None,
weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
**kwargs: Any,
) -> DeepLabV3:
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
.. betastatus:: segmentation module
Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.
Args:
weights (:class:`~torchvision.models.segmentation.DeepLabV3_ResNet50_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.segmentation.DeepLabV3_ResNet50_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
aux_loss (bool, optional): If True, it uses an auxiliary loss
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for the
backbone
**kwargs: unused
.. autoclass:: torchvision.models.segmentation.DeepLabV3_ResNet50_Weights
:members:
"""
weights = DeepLabV3_ResNet50_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
elif num_classes is None:
num_classes = 21
backbone = resnet50(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
model = _deeplabv3_resnet(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
@register_model()
@handle_legacy_interface(
weights=("pretrained", DeepLabV3_ResNet101_Weights.COCO_WITH_VOC_LABELS_V1),
weights_backbone=("pretrained_backbone", ResNet101_Weights.IMAGENET1K_V1),
)
def deeplabv3_resnet101(
*,
weights: Optional[DeepLabV3_ResNet101_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
aux_loss: Optional[bool] = None,
weights_backbone: Optional[ResNet101_Weights] = ResNet101_Weights.IMAGENET1K_V1,
**kwargs: Any,
) -> DeepLabV3:
"""Constructs a DeepLabV3 model with a ResNet-101 backbone.
.. betastatus:: segmentation module
Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.
Args:
weights (:class:`~torchvision.models.segmentation.DeepLabV3_ResNet101_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.segmentation.DeepLabV3_ResNet101_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
aux_loss (bool, optional): If True, it uses an auxiliary loss
weights_backbone (:class:`~torchvision.models.ResNet101_Weights`, optional): The pretrained weights for the
backbone
**kwargs: unused
.. autoclass:: torchvision.models.segmentation.DeepLabV3_ResNet101_Weights
:members:
"""
weights = DeepLabV3_ResNet101_Weights.verify(weights)
weights_backbone = ResNet101_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
elif num_classes is None:
num_classes = 21
backbone = resnet101(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
model = _deeplabv3_resnet(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
@register_model()
@handle_legacy_interface(
weights=("pretrained", DeepLabV3_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1),
weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1),
)
def deeplabv3_mobilenet_v3_large(
*,
weights: Optional[DeepLabV3_MobileNet_V3_Large_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
aux_loss: Optional[bool] = None,
weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1,
**kwargs: Any,
) -> DeepLabV3:
"""Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.
Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.
Args:
weights (:class:`~torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background)
aux_loss (bool, optional): If True, it uses an auxiliary loss
weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The pretrained weights
for the backbone
**kwargs: unused
.. autoclass:: torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights
:members:
"""
weights = DeepLabV3_MobileNet_V3_Large_Weights.verify(weights)
weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
elif num_classes is None:
num_classes = 21
backbone = mobilenet_v3_large(weights=weights_backbone, dilated=True)
model = _deeplabv3_mobilenetv3(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
==============================================================================================================================
SOURCE CODE FILE: fcn.py
LINES: 1
SIZE: 8.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\segmentation\fcn.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Optional
from torch import nn
from ...transforms._presets import SemanticSegmentation
from .._api import register_model, Weights, WeightsEnum
from .._meta import _VOC_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface, IntermediateLayerGetter
from ..resnet import ResNet, resnet101, ResNet101_Weights, resnet50, ResNet50_Weights
from ._utils import _SimpleSegmentationModel
__all__ = ["FCN", "FCN_ResNet50_Weights", "FCN_ResNet101_Weights", "fcn_resnet50", "fcn_resnet101"]
class FCN(_SimpleSegmentationModel):
"""
Implements FCN model from
`"Fully Convolutional Networks for Semantic Segmentation"
<https://arxiv.org/abs/1411.4038>`_.
Args:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class FCNHead(nn.Sequential):
def __init__(self, in_channels: int, channels: int) -> None:
inter_channels = in_channels // 4
layers = [
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(inter_channels, channels, 1),
]
super().__init__(*layers)
_COMMON_META = {
"categories": _VOC_CATEGORIES,
"min_size": (1, 1),
"_docs": """
These weights were trained on a subset of COCO, using only the 20 categories that are present in the Pascal VOC
dataset.
""",
}
class FCN_ResNet50_Weights(WeightsEnum):
COCO_WITH_VOC_LABELS_V1 = Weights(
url="https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth",
transforms=partial(SemanticSegmentation, resize_size=520),
meta={
**_COMMON_META,
"num_params": 35322218,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet50",
"_metrics": {
"COCO-val2017-VOC-labels": {
"miou": 60.5,
"pixel_acc": 91.4,
}
},
"_ops": 152.717,
"_file_size": 135.009,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
class FCN_ResNet101_Weights(WeightsEnum):
COCO_WITH_VOC_LABELS_V1 = Weights(
url="https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth",
transforms=partial(SemanticSegmentation, resize_size=520),
meta={
**_COMMON_META,
"num_params": 54314346,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet101",
"_metrics": {
"COCO-val2017-VOC-labels": {
"miou": 63.7,
"pixel_acc": 91.9,
}
},
"_ops": 232.738,
"_file_size": 207.711,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
def _fcn_resnet(
backbone: ResNet,
num_classes: int,
aux: Optional[bool],
) -> FCN:
return_layers = {"layer4": "out"}
if aux:
return_layers["layer3"] = "aux"
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
aux_classifier = FCNHead(1024, num_classes) if aux else None
classifier = FCNHead(2048, num_classes)
return FCN(backbone, classifier, aux_classifier)
@register_model()
@handle_legacy_interface(
weights=("pretrained", FCN_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def fcn_resnet50(
*,
weights: Optional[FCN_ResNet50_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
aux_loss: Optional[bool] = None,
weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
**kwargs: Any,
) -> FCN:
"""Fully-Convolutional Network model with a ResNet-50 backbone from the `Fully Convolutional
Networks for Semantic Segmentation <https://arxiv.org/abs/1411.4038>`_ paper.
.. betastatus:: segmentation module
Args:
weights (:class:`~torchvision.models.segmentation.FCN_ResNet50_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.segmentation.FCN_ResNet50_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background).
aux_loss (bool, optional): If True, it uses an auxiliary loss.
weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained
weights for the backbone.
**kwargs: parameters passed to the ``torchvision.models.segmentation.fcn.FCN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/fcn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.segmentation.FCN_ResNet50_Weights
:members:
"""
weights = FCN_ResNet50_Weights.verify(weights)
weights_backbone = ResNet50_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
elif num_classes is None:
num_classes = 21
backbone = resnet50(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
model = _fcn_resnet(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
@register_model()
@handle_legacy_interface(
weights=("pretrained", FCN_ResNet101_Weights.COCO_WITH_VOC_LABELS_V1),
weights_backbone=("pretrained_backbone", ResNet101_Weights.IMAGENET1K_V1),
)
def fcn_resnet101(
*,
weights: Optional[FCN_ResNet101_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
aux_loss: Optional[bool] = None,
weights_backbone: Optional[ResNet101_Weights] = ResNet101_Weights.IMAGENET1K_V1,
**kwargs: Any,
) -> FCN:
"""Fully-Convolutional Network model with a ResNet-101 backbone from the `Fully Convolutional
Networks for Semantic Segmentation <https://arxiv.org/abs/1411.4038>`_ paper.
.. betastatus:: segmentation module
Args:
weights (:class:`~torchvision.models.segmentation.FCN_ResNet101_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.segmentation.FCN_ResNet101_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background).
aux_loss (bool, optional): If True, it uses an auxiliary loss.
weights_backbone (:class:`~torchvision.models.ResNet101_Weights`, optional): The pretrained
weights for the backbone.
**kwargs: parameters passed to the ``torchvision.models.segmentation.fcn.FCN``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/fcn.py>`_
for more details about this class.
.. autoclass:: torchvision.models.segmentation.FCN_ResNet101_Weights
:members:
"""
weights = FCN_ResNet101_Weights.verify(weights)
weights_backbone = ResNet101_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
elif num_classes is None:
num_classes = 21
backbone = resnet101(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
model = _fcn_resnet(backbone, num_classes, aux_loss)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
=================================================================================================================================
SOURCE CODE FILE: lraspp.py
LINES: 1
SIZE: 7.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\segmentation\lraspp.py
ENCODING: utf-8
```py
from collections import OrderedDict
from functools import partial
from typing import Any, Dict, Optional
from torch import nn, Tensor
from torch.nn import functional as F
from ...transforms._presets import SemanticSegmentation
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _VOC_CATEGORIES
from .._utils import _ovewrite_value_param, handle_legacy_interface, IntermediateLayerGetter
from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights, MobileNetV3
__all__ = ["LRASPP", "LRASPP_MobileNet_V3_Large_Weights", "lraspp_mobilenet_v3_large"]
class LRASPP(nn.Module):
"""
Implements a Lite R-ASPP Network for semantic segmentation from
`"Searching for MobileNetV3"
<https://arxiv.org/abs/1905.02244>`_.
Args:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"high" for the high level feature map and "low" for the low level feature map.
low_channels (int): the number of channels of the low level features.
high_channels (int): the number of channels of the high level features.
num_classes (int, optional): number of output classes of the model (including the background).
inter_channels (int, optional): the number of channels for intermediate computations.
"""
def __init__(
self, backbone: nn.Module, low_channels: int, high_channels: int, num_classes: int, inter_channels: int = 128
) -> None:
super().__init__()
_log_api_usage_once(self)
self.backbone = backbone
self.classifier = LRASPPHead(low_channels, high_channels, num_classes, inter_channels)
def forward(self, input: Tensor) -> Dict[str, Tensor]:
features = self.backbone(input)
out = self.classifier(features)
out = F.interpolate(out, size=input.shape[-2:], mode="bilinear", align_corners=False)
result = OrderedDict()
result["out"] = out
return result
class LRASPPHead(nn.Module):
def __init__(self, low_channels: int, high_channels: int, num_classes: int, inter_channels: int) -> None:
super().__init__()
self.cbr = nn.Sequential(
nn.Conv2d(high_channels, inter_channels, 1, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
)
self.scale = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(high_channels, inter_channels, 1, bias=False),
nn.Sigmoid(),
)
self.low_classifier = nn.Conv2d(low_channels, num_classes, 1)
self.high_classifier = nn.Conv2d(inter_channels, num_classes, 1)
def forward(self, input: Dict[str, Tensor]) -> Tensor:
low = input["low"]
high = input["high"]
x = self.cbr(high)
s = self.scale(high)
x = x * s
x = F.interpolate(x, size=low.shape[-2:], mode="bilinear", align_corners=False)
return self.low_classifier(low) + self.high_classifier(x)
def _lraspp_mobilenetv3(backbone: MobileNetV3, num_classes: int) -> LRASPP:
backbone = backbone.features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
low_pos = stage_indices[-4] # use C2 here which has output_stride = 8
high_pos = stage_indices[-1] # use C5 which has output_stride = 16
low_channels = backbone[low_pos].out_channels
high_channels = backbone[high_pos].out_channels
backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): "low", str(high_pos): "high"})
return LRASPP(backbone, low_channels, high_channels, num_classes)
class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum):
COCO_WITH_VOC_LABELS_V1 = Weights(
url="https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth",
transforms=partial(SemanticSegmentation, resize_size=520),
meta={
"num_params": 3221538,
"categories": _VOC_CATEGORIES,
"min_size": (1, 1),
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#lraspp_mobilenet_v3_large",
"_metrics": {
"COCO-val2017-VOC-labels": {
"miou": 57.9,
"pixel_acc": 91.2,
}
},
"_ops": 2.086,
"_file_size": 12.49,
"_docs": """
These weights were trained on a subset of COCO, using only the 20 categories that are present in the
Pascal VOC dataset.
""",
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
@register_model()
@handle_legacy_interface(
weights=("pretrained", LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1),
weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1),
)
def lraspp_mobilenet_v3_large(
*,
weights: Optional[LRASPP_MobileNet_V3_Large_Weights] = None,
progress: bool = True,
num_classes: Optional[int] = None,
weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1,
**kwargs: Any,
) -> LRASPP:
"""Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone from
`Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_ paper.
.. betastatus:: segmentation module
Args:
weights (:class:`~torchvision.models.segmentation.LRASPP_MobileNet_V3_Large_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.segmentation.LRASPP_MobileNet_V3_Large_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
num_classes (int, optional): number of output classes of the model (including the background).
aux_loss (bool, optional): If True, it uses an auxiliary loss.
weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The pretrained
weights for the backbone.
**kwargs: parameters passed to the ``torchvision.models.segmentation.LRASPP``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/lraspp.py>`_
for more details about this class.
.. autoclass:: torchvision.models.segmentation.LRASPP_MobileNet_V3_Large_Weights
:members:
"""
if kwargs.pop("aux_loss", False):
raise NotImplementedError("This model does not use auxiliary loss")
weights = LRASPP_MobileNet_V3_Large_Weights.verify(weights)
weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone)
if weights is not None:
weights_backbone = None
num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
elif num_classes is None:
num_classes = 21
backbone = mobilenet_v3_large(weights=weights_backbone, dilated=True)
model = _lraspp_mobilenetv3(backbone, num_classes)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
==========================================================================================================================
SOURCE CODE FILE: shufflenetv2.py
LINES: 1
SIZE: 15.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\shufflenetv2.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Callable, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"ShuffleNetV2",
"ShuffleNet_V2_X0_5_Weights",
"ShuffleNet_V2_X1_0_Weights",
"ShuffleNet_V2_X1_5_Weights",
"ShuffleNet_V2_X2_0_Weights",
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"shufflenet_v2_x1_5",
"shufflenet_v2_x2_0",
]
def channel_shuffle(x: Tensor, groups: int) -> Tensor:
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, num_channels, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp: int, oup: int, stride: int) -> None:
super().__init__()
if not (1 <= stride <= 3):
raise ValueError("illegal stride value")
self.stride = stride
branch_features = oup // 2
if (self.stride == 1) and (inp != branch_features << 1):
raise ValueError(
f"Invalid combination of stride {stride}, inp {inp} and oup {oup} values. If stride == 1 then inp should be equal to oup // 2 << 1."
)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
else:
self.branch1 = nn.Sequential()
self.branch2 = nn.Sequential(
nn.Conv2d(
inp if (self.stride > 1) else branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
@staticmethod
def depthwise_conv(
i: int, o: int, kernel_size: int, stride: int = 1, padding: int = 0, bias: bool = False
) -> nn.Conv2d:
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x: Tensor) -> Tensor:
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
class ShuffleNetV2(nn.Module):
def __init__(
self,
stages_repeats: List[int],
stages_out_channels: List[int],
num_classes: int = 1000,
inverted_residual: Callable[..., nn.Module] = InvertedResidual,
) -> None:
super().__init__()
_log_api_usage_once(self)
if len(stages_repeats) != 3:
raise ValueError("expected stages_repeats as list of 3 positive ints")
if len(stages_out_channels) != 5:
raise ValueError("expected stages_out_channels as list of 5 positive ints")
self._stage_out_channels = stages_out_channels
input_channels = 3
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Static annotations for mypy
self.stage2: nn.Sequential
self.stage3: nn.Sequential
self.stage4: nn.Sequential
stage_names = [f"stage{i}" for i in [2, 3, 4]]
for name, repeats, output_channels in zip(stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [inverted_residual(input_channels, output_channels, 2)]
for i in range(repeats - 1):
seq.append(inverted_residual(output_channels, output_channels, 1))
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
output_channels = self._stage_out_channels[-1]
self.conv5 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
self.fc = nn.Linear(output_channels, num_classes)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
x = x.mean([2, 3]) # globalpool
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _shufflenetv2(
weights: Optional[WeightsEnum],
progress: bool,
*args: Any,
**kwargs: Any,
) -> ShuffleNetV2:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = ShuffleNetV2(*args, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/ericsun99/Shufflenet-v2-Pytorch",
}
class ShuffleNet_V2_X0_5_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/ericsun99/Shufflenet-v2-Pytorch
url="https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 1366792,
"_metrics": {
"ImageNet-1K": {
"acc@1": 60.552,
"acc@5": 81.746,
}
},
"_ops": 0.04,
"_file_size": 5.282,
"_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class ShuffleNet_V2_X1_0_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/ericsun99/Shufflenet-v2-Pytorch
url="https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2278604,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.362,
"acc@5": 88.316,
}
},
"_ops": 0.145,
"_file_size": 8.791,
"_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class ShuffleNet_V2_X1_5_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/shufflenetv2_x1_5-3c479a10.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/5906",
"num_params": 3503624,
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.996,
"acc@5": 91.086,
}
},
"_ops": 0.296,
"_file_size": 13.557,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
class ShuffleNet_V2_X2_0_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/shufflenetv2_x2_0-8be3c8ee.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/5906",
"num_params": 7393996,
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.230,
"acc@5": 93.006,
}
},
"_ops": 0.583,
"_file_size": 28.433,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1))
def shufflenet_v2_x0_5(
*, weights: Optional[ShuffleNet_V2_X0_5_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 0.5x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
:members:
"""
weights = ShuffleNet_V2_X0_5_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1))
def shufflenet_v2_x1_0(
*, weights: Optional[ShuffleNet_V2_X1_0_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 1.0x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
:members:
"""
weights = ShuffleNet_V2_X1_0_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1))
def shufflenet_v2_x1_5(
*, weights: Optional[ShuffleNet_V2_X1_5_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 1.5x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
:members:
"""
weights = ShuffleNet_V2_X1_5_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1))
def shufflenet_v2_x2_0(
*, weights: Optional[ShuffleNet_V2_X2_0_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 2.0x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
:members:
"""
weights = ShuffleNet_V2_X2_0_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
```
|
========================================================================================================================
SOURCE CODE FILE: squeezenet.py
LINES: 1
SIZE: 8.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\squeezenet.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
import torch.nn.init as init
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["SqueezeNet", "SqueezeNet1_0_Weights", "SqueezeNet1_1_Weights", "squeezenet1_0", "squeezenet1_1"]
class Fire(nn.Module):
def __init__(self, inplanes: int, squeeze_planes: int, expand1x1_planes: int, expand3x3_planes: int) -> None:
super().__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.squeeze_activation(self.squeeze(x))
return torch.cat(
[self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1
)
class SqueezeNet(nn.Module):
def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.num_classes = num_classes
if version == "1_0":
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
elif version == "1_1":
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
else:
# FIXME: Is this needed? SqueezeNet should only be called from the
# FIXME: squeezenet1_x() functions
# FIXME: This checking is not done for the other models
raise ValueError(f"Unsupported SqueezeNet version {version}: 1_0 or 1_1 expected")
# Final convolution is initialized differently from the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout), final_conv, nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d((1, 1))
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.classifier(x)
return torch.flatten(x, 1)
def _squeezenet(
version: str,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> SqueezeNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = SqueezeNet(version, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/pull/49#issuecomment-277560717",
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
}
class SqueezeNet1_0_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"min_size": (21, 21),
"num_params": 1248424,
"_metrics": {
"ImageNet-1K": {
"acc@1": 58.092,
"acc@5": 80.420,
}
},
"_ops": 0.819,
"_file_size": 4.778,
},
)
DEFAULT = IMAGENET1K_V1
class SqueezeNet1_1_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/squeezenet1_1-b8a52dc0.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"min_size": (17, 17),
"num_params": 1235496,
"_metrics": {
"ImageNet-1K": {
"acc@1": 58.178,
"acc@5": 80.624,
}
},
"_ops": 0.349,
"_file_size": 4.729,
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", SqueezeNet1_0_Weights.IMAGENET1K_V1))
def squeezenet1_0(
*, weights: Optional[SqueezeNet1_0_Weights] = None, progress: bool = True, **kwargs: Any
) -> SqueezeNet:
"""SqueezeNet model architecture from the `SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size
<https://arxiv.org/abs/1602.07360>`_ paper.
Args:
weights (:class:`~torchvision.models.SqueezeNet1_0_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.SqueezeNet1_0_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.SqueezeNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.SqueezeNet1_0_Weights
:members:
"""
weights = SqueezeNet1_0_Weights.verify(weights)
return _squeezenet("1_0", weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", SqueezeNet1_1_Weights.IMAGENET1K_V1))
def squeezenet1_1(
*, weights: Optional[SqueezeNet1_1_Weights] = None, progress: bool = True, **kwargs: Any
) -> SqueezeNet:
"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
weights (:class:`~torchvision.models.SqueezeNet1_1_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.SqueezeNet1_1_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.SqueezeNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.SqueezeNet1_1_Weights
:members:
"""
weights = SqueezeNet1_1_Weights.verify(weights)
return _squeezenet("1_1", weights, progress, **kwargs)
```
|
==============================================================================================================================
SOURCE CODE FILE: swin_transformer.py
LINES: 1
SIZE: 39.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\swin_transformer.py
ENCODING: utf-8
```py
import math
from functools import partial
from typing import Any, Callable, List, Optional
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from ..ops.misc import MLP, Permute
from ..ops.stochastic_depth import StochasticDepth
from ..transforms._presets import ImageClassification, InterpolationMode
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"SwinTransformer",
"Swin_T_Weights",
"Swin_S_Weights",
"Swin_B_Weights",
"Swin_V2_T_Weights",
"Swin_V2_S_Weights",
"Swin_V2_B_Weights",
"swin_t",
"swin_s",
"swin_b",
"swin_v2_t",
"swin_v2_s",
"swin_v2_b",
]
def _patch_merging_pad(x: torch.Tensor) -> torch.Tensor:
H, W, _ = x.shape[-3:]
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[..., 0::2, 0::2, :] # ... H/2 W/2 C
x1 = x[..., 1::2, 0::2, :] # ... H/2 W/2 C
x2 = x[..., 0::2, 1::2, :] # ... H/2 W/2 C
x3 = x[..., 1::2, 1::2, :] # ... H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # ... H/2 W/2 4*C
return x
torch.fx.wrap("_patch_merging_pad")
def _get_relative_position_bias(
relative_position_bias_table: torch.Tensor, relative_position_index: torch.Tensor, window_size: List[int]
) -> torch.Tensor:
N = window_size[0] * window_size[1]
relative_position_bias = relative_position_bias_table[relative_position_index] # type: ignore[index]
relative_position_bias = relative_position_bias.view(N, N, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous().unsqueeze(0)
return relative_position_bias
torch.fx.wrap("_get_relative_position_bias")
class PatchMerging(nn.Module):
"""Patch Merging Layer.
Args:
dim (int): Number of input channels.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
"""
def __init__(self, dim: int, norm_layer: Callable[..., nn.Module] = nn.LayerNorm):
super().__init__()
_log_api_usage_once(self)
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x: Tensor):
"""
Args:
x (Tensor): input tensor with expected layout of [..., H, W, C]
Returns:
Tensor with layout of [..., H/2, W/2, 2*C]
"""
x = _patch_merging_pad(x)
x = self.norm(x)
x = self.reduction(x) # ... H/2 W/2 2*C
return x
class PatchMergingV2(nn.Module):
"""Patch Merging Layer for Swin Transformer V2.
Args:
dim (int): Number of input channels.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
"""
def __init__(self, dim: int, norm_layer: Callable[..., nn.Module] = nn.LayerNorm):
super().__init__()
_log_api_usage_once(self)
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(2 * dim) # difference
def forward(self, x: Tensor):
"""
Args:
x (Tensor): input tensor with expected layout of [..., H, W, C]
Returns:
Tensor with layout of [..., H/2, W/2, 2*C]
"""
x = _patch_merging_pad(x)
x = self.reduction(x) # ... H/2 W/2 2*C
x = self.norm(x)
return x
def shifted_window_attention(
input: Tensor,
qkv_weight: Tensor,
proj_weight: Tensor,
relative_position_bias: Tensor,
window_size: List[int],
num_heads: int,
shift_size: List[int],
attention_dropout: float = 0.0,
dropout: float = 0.0,
qkv_bias: Optional[Tensor] = None,
proj_bias: Optional[Tensor] = None,
logit_scale: Optional[torch.Tensor] = None,
training: bool = True,
) -> Tensor:
"""
Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
input (Tensor[N, H, W, C]): The input tensor or 4-dimensions.
qkv_weight (Tensor[in_dim, out_dim]): The weight tensor of query, key, value.
proj_weight (Tensor[out_dim, out_dim]): The weight tensor of projection.
relative_position_bias (Tensor): The learned relative position bias added to attention.
window_size (List[int]): Window size.
num_heads (int): Number of attention heads.
shift_size (List[int]): Shift size for shifted window attention.
attention_dropout (float): Dropout ratio of attention weight. Default: 0.0.
dropout (float): Dropout ratio of output. Default: 0.0.
qkv_bias (Tensor[out_dim], optional): The bias tensor of query, key, value. Default: None.
proj_bias (Tensor[out_dim], optional): The bias tensor of projection. Default: None.
logit_scale (Tensor[out_dim], optional): Logit scale of cosine attention for Swin Transformer V2. Default: None.
training (bool, optional): Training flag used by the dropout parameters. Default: True.
Returns:
Tensor[N, H, W, C]: The output tensor after shifted window attention.
"""
B, H, W, C = input.shape
# pad feature maps to multiples of window size
pad_r = (window_size[1] - W % window_size[1]) % window_size[1]
pad_b = (window_size[0] - H % window_size[0]) % window_size[0]
x = F.pad(input, (0, 0, 0, pad_r, 0, pad_b))
_, pad_H, pad_W, _ = x.shape
shift_size = shift_size.copy()
# If window size is larger than feature size, there is no need to shift window
if window_size[0] >= pad_H:
shift_size[0] = 0
if window_size[1] >= pad_W:
shift_size[1] = 0
# cyclic shift
if sum(shift_size) > 0:
x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1]), dims=(1, 2))
# partition windows
num_windows = (pad_H // window_size[0]) * (pad_W // window_size[1])
x = x.view(B, pad_H // window_size[0], window_size[0], pad_W // window_size[1], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).reshape(B * num_windows, window_size[0] * window_size[1], C) # B*nW, Ws*Ws, C
# multi-head attention
if logit_scale is not None and qkv_bias is not None:
qkv_bias = qkv_bias.clone()
length = qkv_bias.numel() // 3
qkv_bias[length : 2 * length].zero_()
qkv = F.linear(x, qkv_weight, qkv_bias)
qkv = qkv.reshape(x.size(0), x.size(1), 3, num_heads, C // num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
if logit_scale is not None:
# cosine attention
attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)
logit_scale = torch.clamp(logit_scale, max=math.log(100.0)).exp()
attn = attn * logit_scale
else:
q = q * (C // num_heads) ** -0.5
attn = q.matmul(k.transpose(-2, -1))
# add relative position bias
attn = attn + relative_position_bias
if sum(shift_size) > 0:
# generate attention mask
attn_mask = x.new_zeros((pad_H, pad_W))
h_slices = ((0, -window_size[0]), (-window_size[0], -shift_size[0]), (-shift_size[0], None))
w_slices = ((0, -window_size[1]), (-window_size[1], -shift_size[1]), (-shift_size[1], None))
count = 0
for h in h_slices:
for w in w_slices:
attn_mask[h[0] : h[1], w[0] : w[1]] = count
count += 1
attn_mask = attn_mask.view(pad_H // window_size[0], window_size[0], pad_W // window_size[1], window_size[1])
attn_mask = attn_mask.permute(0, 2, 1, 3).reshape(num_windows, window_size[0] * window_size[1])
attn_mask = attn_mask.unsqueeze(1) - attn_mask.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
attn = attn.view(x.size(0) // num_windows, num_windows, num_heads, x.size(1), x.size(1))
attn = attn + attn_mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, num_heads, x.size(1), x.size(1))
attn = F.softmax(attn, dim=-1)
attn = F.dropout(attn, p=attention_dropout, training=training)
x = attn.matmul(v).transpose(1, 2).reshape(x.size(0), x.size(1), C)
x = F.linear(x, proj_weight, proj_bias)
x = F.dropout(x, p=dropout, training=training)
# reverse windows
x = x.view(B, pad_H // window_size[0], pad_W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, pad_H, pad_W, C)
# reverse cyclic shift
if sum(shift_size) > 0:
x = torch.roll(x, shifts=(shift_size[0], shift_size[1]), dims=(1, 2))
# unpad features
x = x[:, :H, :W, :].contiguous()
return x
torch.fx.wrap("shifted_window_attention")
class ShiftedWindowAttention(nn.Module):
"""
See :func:`shifted_window_attention`.
"""
def __init__(
self,
dim: int,
window_size: List[int],
shift_size: List[int],
num_heads: int,
qkv_bias: bool = True,
proj_bias: bool = True,
attention_dropout: float = 0.0,
dropout: float = 0.0,
):
super().__init__()
if len(window_size) != 2 or len(shift_size) != 2:
raise ValueError("window_size and shift_size must be of length 2")
self.window_size = window_size
self.shift_size = shift_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.dropout = dropout
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim, bias=proj_bias)
self.define_relative_position_bias_table()
self.define_relative_position_index()
def define_relative_position_bias_table(self):
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), self.num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02)
def define_relative_position_index(self):
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij")) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1).flatten() # Wh*Ww*Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
def get_relative_position_bias(self) -> torch.Tensor:
return _get_relative_position_bias(
self.relative_position_bias_table, self.relative_position_index, self.window_size # type: ignore[arg-type]
)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Tensor with layout of [B, H, W, C]
Returns:
Tensor with same layout as input, i.e. [B, H, W, C]
"""
relative_position_bias = self.get_relative_position_bias()
return shifted_window_attention(
x,
self.qkv.weight,
self.proj.weight,
relative_position_bias,
self.window_size,
self.num_heads,
shift_size=self.shift_size,
attention_dropout=self.attention_dropout,
dropout=self.dropout,
qkv_bias=self.qkv.bias,
proj_bias=self.proj.bias,
training=self.training,
)
class ShiftedWindowAttentionV2(ShiftedWindowAttention):
"""
See :func:`shifted_window_attention_v2`.
"""
def __init__(
self,
dim: int,
window_size: List[int],
shift_size: List[int],
num_heads: int,
qkv_bias: bool = True,
proj_bias: bool = True,
attention_dropout: float = 0.0,
dropout: float = 0.0,
):
super().__init__(
dim,
window_size,
shift_size,
num_heads,
qkv_bias=qkv_bias,
proj_bias=proj_bias,
attention_dropout=attention_dropout,
dropout=dropout,
)
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
# mlp to generate continuous relative position bias
self.cpb_mlp = nn.Sequential(
nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False)
)
if qkv_bias:
length = self.qkv.bias.numel() // 3
self.qkv.bias[length : 2 * length].data.zero_()
def define_relative_position_bias_table(self):
# get relative_coords_table
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
relative_coords_table = torch.stack(torch.meshgrid([relative_coords_h, relative_coords_w], indexing="ij"))
relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
relative_coords_table *= 8 # normalize to -8, 8
relative_coords_table = (
torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / 3.0
)
self.register_buffer("relative_coords_table", relative_coords_table)
def get_relative_position_bias(self) -> torch.Tensor:
relative_position_bias = _get_relative_position_bias(
self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads),
self.relative_position_index, # type: ignore[arg-type]
self.window_size,
)
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
return relative_position_bias
def forward(self, x: Tensor):
"""
Args:
x (Tensor): Tensor with layout of [B, H, W, C]
Returns:
Tensor with same layout as input, i.e. [B, H, W, C]
"""
relative_position_bias = self.get_relative_position_bias()
return shifted_window_attention(
x,
self.qkv.weight,
self.proj.weight,
relative_position_bias,
self.window_size,
self.num_heads,
shift_size=self.shift_size,
attention_dropout=self.attention_dropout,
dropout=self.dropout,
qkv_bias=self.qkv.bias,
proj_bias=self.proj.bias,
logit_scale=self.logit_scale,
training=self.training,
)
class SwinTransformerBlock(nn.Module):
"""
Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (List[int]): Window size.
shift_size (List[int]): Shift size for shifted window attention.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0.
dropout (float): Dropout rate. Default: 0.0.
attention_dropout (float): Attention dropout rate. Default: 0.0.
stochastic_depth_prob: (float): Stochastic depth rate. Default: 0.0.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
attn_layer (nn.Module): Attention layer. Default: ShiftedWindowAttention
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: List[int],
shift_size: List[int],
mlp_ratio: float = 4.0,
dropout: float = 0.0,
attention_dropout: float = 0.0,
stochastic_depth_prob: float = 0.0,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
attn_layer: Callable[..., nn.Module] = ShiftedWindowAttention,
):
super().__init__()
_log_api_usage_once(self)
self.norm1 = norm_layer(dim)
self.attn = attn_layer(
dim,
window_size,
shift_size,
num_heads,
attention_dropout=attention_dropout,
dropout=dropout,
)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
self.norm2 = norm_layer(dim)
self.mlp = MLP(dim, [int(dim * mlp_ratio), dim], activation_layer=nn.GELU, inplace=None, dropout=dropout)
for m in self.mlp.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.normal_(m.bias, std=1e-6)
def forward(self, x: Tensor):
x = x + self.stochastic_depth(self.attn(self.norm1(x)))
x = x + self.stochastic_depth(self.mlp(self.norm2(x)))
return x
class SwinTransformerBlockV2(SwinTransformerBlock):
"""
Swin Transformer V2 Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (List[int]): Window size.
shift_size (List[int]): Shift size for shifted window attention.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0.
dropout (float): Dropout rate. Default: 0.0.
attention_dropout (float): Attention dropout rate. Default: 0.0.
stochastic_depth_prob: (float): Stochastic depth rate. Default: 0.0.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
attn_layer (nn.Module): Attention layer. Default: ShiftedWindowAttentionV2.
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: List[int],
shift_size: List[int],
mlp_ratio: float = 4.0,
dropout: float = 0.0,
attention_dropout: float = 0.0,
stochastic_depth_prob: float = 0.0,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
attn_layer: Callable[..., nn.Module] = ShiftedWindowAttentionV2,
):
super().__init__(
dim,
num_heads,
window_size,
shift_size,
mlp_ratio=mlp_ratio,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth_prob=stochastic_depth_prob,
norm_layer=norm_layer,
attn_layer=attn_layer,
)
def forward(self, x: Tensor):
# Here is the difference, we apply norm after the attention in V2.
# In V1 we applied norm before the attention.
x = x + self.stochastic_depth(self.norm1(self.attn(x)))
x = x + self.stochastic_depth(self.norm2(self.mlp(x)))
return x
class SwinTransformer(nn.Module):
"""
Implements Swin Transformer from the `"Swin Transformer: Hierarchical Vision Transformer using
Shifted Windows" <https://arxiv.org/abs/2103.14030>`_ paper.
Args:
patch_size (List[int]): Patch size.
embed_dim (int): Patch embedding dimension.
depths (List(int)): Depth of each Swin Transformer layer.
num_heads (List(int)): Number of attention heads in different layers.
window_size (List[int]): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0.
dropout (float): Dropout rate. Default: 0.0.
attention_dropout (float): Attention dropout rate. Default: 0.0.
stochastic_depth_prob (float): Stochastic depth rate. Default: 0.1.
num_classes (int): Number of classes for classification head. Default: 1000.
block (nn.Module, optional): SwinTransformer Block. Default: None.
norm_layer (nn.Module, optional): Normalization layer. Default: None.
downsample_layer (nn.Module): Downsample layer (patch merging). Default: PatchMerging.
"""
def __init__(
self,
patch_size: List[int],
embed_dim: int,
depths: List[int],
num_heads: List[int],
window_size: List[int],
mlp_ratio: float = 4.0,
dropout: float = 0.0,
attention_dropout: float = 0.0,
stochastic_depth_prob: float = 0.1,
num_classes: int = 1000,
norm_layer: Optional[Callable[..., nn.Module]] = None,
block: Optional[Callable[..., nn.Module]] = None,
downsample_layer: Callable[..., nn.Module] = PatchMerging,
):
super().__init__()
_log_api_usage_once(self)
self.num_classes = num_classes
if block is None:
block = SwinTransformerBlock
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-5)
layers: List[nn.Module] = []
# split image into non-overlapping patches
layers.append(
nn.Sequential(
nn.Conv2d(
3, embed_dim, kernel_size=(patch_size[0], patch_size[1]), stride=(patch_size[0], patch_size[1])
),
Permute([0, 2, 3, 1]),
norm_layer(embed_dim),
)
)
total_stage_blocks = sum(depths)
stage_block_id = 0
# build SwinTransformer blocks
for i_stage in range(len(depths)):
stage: List[nn.Module] = []
dim = embed_dim * 2**i_stage
for i_layer in range(depths[i_stage]):
# adjust stochastic depth probability based on the depth of the stage block
sd_prob = stochastic_depth_prob * float(stage_block_id) / (total_stage_blocks - 1)
stage.append(
block(
dim,
num_heads[i_stage],
window_size=window_size,
shift_size=[0 if i_layer % 2 == 0 else w // 2 for w in window_size],
mlp_ratio=mlp_ratio,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth_prob=sd_prob,
norm_layer=norm_layer,
)
)
stage_block_id += 1
layers.append(nn.Sequential(*stage))
# add patch merging layer
if i_stage < (len(depths) - 1):
layers.append(downsample_layer(dim, norm_layer))
self.features = nn.Sequential(*layers)
num_features = embed_dim * 2 ** (len(depths) - 1)
self.norm = norm_layer(num_features)
self.permute = Permute([0, 3, 1, 2]) # B H W C -> B C H W
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.flatten = nn.Flatten(1)
self.head = nn.Linear(num_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = self.norm(x)
x = self.permute(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.head(x)
return x
def _swin_transformer(
patch_size: List[int],
embed_dim: int,
depths: List[int],
num_heads: List[int],
window_size: List[int],
stochastic_depth_prob: float,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> SwinTransformer:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = SwinTransformer(
patch_size=patch_size,
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
stochastic_depth_prob=stochastic_depth_prob,
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"categories": _IMAGENET_CATEGORIES,
}
class Swin_T_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/swin_t-704ceda3.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=232, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META,
"num_params": 28288354,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.474,
"acc@5": 95.776,
}
},
"_ops": 4.491,
"_file_size": 108.19,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
class Swin_S_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/swin_s-5e29d889.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=246, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META,
"num_params": 49606258,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.196,
"acc@5": 96.360,
}
},
"_ops": 8.741,
"_file_size": 189.786,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
class Swin_B_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/swin_b-68c6b09e.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=238, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META,
"num_params": 87768224,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.582,
"acc@5": 96.640,
}
},
"_ops": 15.431,
"_file_size": 335.364,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
class Swin_V2_T_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/swin_v2_t-b137f0e2.pth",
transforms=partial(
ImageClassification, crop_size=256, resize_size=260, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META,
"num_params": 28351570,
"min_size": (256, 256),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer-v2",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.072,
"acc@5": 96.132,
}
},
"_ops": 5.94,
"_file_size": 108.626,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
class Swin_V2_S_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/swin_v2_s-637d8ceb.pth",
transforms=partial(
ImageClassification, crop_size=256, resize_size=260, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META,
"num_params": 49737442,
"min_size": (256, 256),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer-v2",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.712,
"acc@5": 96.816,
}
},
"_ops": 11.546,
"_file_size": 190.675,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
class Swin_V2_B_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/swin_v2_b-781e5279.pth",
transforms=partial(
ImageClassification, crop_size=256, resize_size=272, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_META,
"num_params": 87930848,
"min_size": (256, 256),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer-v2",
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.112,
"acc@5": 96.864,
}
},
"_ops": 20.325,
"_file_size": 336.372,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_T_Weights.IMAGENET1K_V1))
def swin_t(*, weights: Optional[Swin_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_tiny architecture from
`Swin Transformer: Hierarchical Vision Transformer using Shifted Windows <https://arxiv.org/abs/2103.14030>`_.
Args:
weights (:class:`~torchvision.models.Swin_T_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Swin_T_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Swin_T_Weights
:members:
"""
weights = Swin_T_Weights.verify(weights)
return _swin_transformer(
patch_size=[4, 4],
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=[7, 7],
stochastic_depth_prob=0.2,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_S_Weights.IMAGENET1K_V1))
def swin_s(*, weights: Optional[Swin_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_small architecture from
`Swin Transformer: Hierarchical Vision Transformer using Shifted Windows <https://arxiv.org/abs/2103.14030>`_.
Args:
weights (:class:`~torchvision.models.Swin_S_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Swin_S_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Swin_S_Weights
:members:
"""
weights = Swin_S_Weights.verify(weights)
return _swin_transformer(
patch_size=[4, 4],
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=[7, 7],
stochastic_depth_prob=0.3,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_B_Weights.IMAGENET1K_V1))
def swin_b(*, weights: Optional[Swin_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_base architecture from
`Swin Transformer: Hierarchical Vision Transformer using Shifted Windows <https://arxiv.org/abs/2103.14030>`_.
Args:
weights (:class:`~torchvision.models.Swin_B_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Swin_B_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Swin_B_Weights
:members:
"""
weights = Swin_B_Weights.verify(weights)
return _swin_transformer(
patch_size=[4, 4],
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=[7, 7],
stochastic_depth_prob=0.5,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_V2_T_Weights.IMAGENET1K_V1))
def swin_v2_t(*, weights: Optional[Swin_V2_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_v2_tiny architecture from
`Swin Transformer V2: Scaling Up Capacity and Resolution <https://arxiv.org/abs/2111.09883>`_.
Args:
weights (:class:`~torchvision.models.Swin_V2_T_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Swin_V2_T_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Swin_V2_T_Weights
:members:
"""
weights = Swin_V2_T_Weights.verify(weights)
return _swin_transformer(
patch_size=[4, 4],
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=[8, 8],
stochastic_depth_prob=0.2,
weights=weights,
progress=progress,
block=SwinTransformerBlockV2,
downsample_layer=PatchMergingV2,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_V2_S_Weights.IMAGENET1K_V1))
def swin_v2_s(*, weights: Optional[Swin_V2_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_v2_small architecture from
`Swin Transformer V2: Scaling Up Capacity and Resolution <https://arxiv.org/abs/2111.09883>`_.
Args:
weights (:class:`~torchvision.models.Swin_V2_S_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Swin_V2_S_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Swin_V2_S_Weights
:members:
"""
weights = Swin_V2_S_Weights.verify(weights)
return _swin_transformer(
patch_size=[4, 4],
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=[8, 8],
stochastic_depth_prob=0.3,
weights=weights,
progress=progress,
block=SwinTransformerBlockV2,
downsample_layer=PatchMergingV2,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_V2_B_Weights.IMAGENET1K_V1))
def swin_v2_b(*, weights: Optional[Swin_V2_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_v2_base architecture from
`Swin Transformer V2: Scaling Up Capacity and Resolution <https://arxiv.org/abs/2111.09883>`_.
Args:
weights (:class:`~torchvision.models.Swin_V2_B_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.Swin_V2_B_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.Swin_V2_B_Weights
:members:
"""
weights = Swin_V2_B_Weights.verify(weights)
return _swin_transformer(
patch_size=[4, 4],
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=[8, 8],
stochastic_depth_prob=0.5,
weights=weights,
progress=progress,
block=SwinTransformerBlockV2,
downsample_layer=PatchMergingV2,
**kwargs,
)
```
|
=================================================================================================================
SOURCE CODE FILE: vgg.py
LINES: 1
SIZE: 19.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\vgg.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, cast, Dict, List, Optional, Union
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"VGG",
"VGG11_Weights",
"VGG11_BN_Weights",
"VGG13_Weights",
"VGG13_BN_Weights",
"VGG16_Weights",
"VGG16_BN_Weights",
"VGG19_Weights",
"VGG19_BN_Weights",
"vgg11",
"vgg11_bn",
"vgg13",
"vgg13_bn",
"vgg16",
"vgg16_bn",
"vgg19",
"vgg19_bn",
]
class VGG(nn.Module):
def __init__(
self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5
) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(p=dropout),
nn.Linear(4096, num_classes),
)
if init_weights:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
"A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
"B": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
"D": [64, 64, "M", 128, 128, "M", 256, 256, 256, "M", 512, 512, 512, "M", 512, 512, 512, "M"],
"E": [64, 64, "M", 128, 128, "M", 256, 256, 256, 256, "M", 512, 512, 512, 512, "M", 512, 512, 512, 512, "M"],
}
def _vgg(cfg: str, batch_norm: bool, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any) -> VGG:
if weights is not None:
kwargs["init_weights"] = False
if weights.meta["categories"] is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (32, 32),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_docs": """These weights were trained from scratch by using a simplified training recipe.""",
}
class VGG11_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg11-8a719046.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 132863336,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.020,
"acc@5": 88.628,
}
},
"_ops": 7.609,
"_file_size": 506.84,
},
)
DEFAULT = IMAGENET1K_V1
class VGG11_BN_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg11_bn-6002323d.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 132868840,
"_metrics": {
"ImageNet-1K": {
"acc@1": 70.370,
"acc@5": 89.810,
}
},
"_ops": 7.609,
"_file_size": 506.881,
},
)
DEFAULT = IMAGENET1K_V1
class VGG13_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg13-19584684.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 133047848,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.928,
"acc@5": 89.246,
}
},
"_ops": 11.308,
"_file_size": 507.545,
},
)
DEFAULT = IMAGENET1K_V1
class VGG13_BN_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg13_bn-abd245e5.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 133053736,
"_metrics": {
"ImageNet-1K": {
"acc@1": 71.586,
"acc@5": 90.374,
}
},
"_ops": 11.308,
"_file_size": 507.59,
},
)
DEFAULT = IMAGENET1K_V1
class VGG16_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg16-397923af.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 138357544,
"_metrics": {
"ImageNet-1K": {
"acc@1": 71.592,
"acc@5": 90.382,
}
},
"_ops": 15.47,
"_file_size": 527.796,
},
)
IMAGENET1K_FEATURES = Weights(
# Weights ported from https://github.com/amdegroot/ssd.pytorch/
url="https://download.pytorch.org/models/vgg16_features-amdegroot-88682ab5.pth",
transforms=partial(
ImageClassification,
crop_size=224,
mean=(0.48235, 0.45882, 0.40784),
std=(1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0),
),
meta={
**_COMMON_META,
"num_params": 138357544,
"categories": None,
"recipe": "https://github.com/amdegroot/ssd.pytorch#training-ssd",
"_metrics": {
"ImageNet-1K": {
"acc@1": float("nan"),
"acc@5": float("nan"),
}
},
"_ops": 15.47,
"_file_size": 527.802,
"_docs": """
These weights can't be used for classification because they are missing values in the `classifier`
module. Only the `features` module has valid values and can be used for feature extraction. The weights
were trained using the original input standardization method as described in the paper.
""",
},
)
DEFAULT = IMAGENET1K_V1
class VGG16_BN_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg16_bn-6c64b313.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 138365992,
"_metrics": {
"ImageNet-1K": {
"acc@1": 73.360,
"acc@5": 91.516,
}
},
"_ops": 15.47,
"_file_size": 527.866,
},
)
DEFAULT = IMAGENET1K_V1
class VGG19_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg19-dcbb9e9d.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 143667240,
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.376,
"acc@5": 90.876,
}
},
"_ops": 19.632,
"_file_size": 548.051,
},
)
DEFAULT = IMAGENET1K_V1
class VGG19_BN_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vgg19_bn-c79401a0.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 143678248,
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.218,
"acc@5": 91.842,
}
},
"_ops": 19.632,
"_file_size": 548.143,
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG11_Weights.IMAGENET1K_V1))
def vgg11(*, weights: Optional[VGG11_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-11 from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG11_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG11_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG11_Weights
:members:
"""
weights = VGG11_Weights.verify(weights)
return _vgg("A", False, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG11_BN_Weights.IMAGENET1K_V1))
def vgg11_bn(*, weights: Optional[VGG11_BN_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-11-BN from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG11_BN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG11_BN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG11_BN_Weights
:members:
"""
weights = VGG11_BN_Weights.verify(weights)
return _vgg("A", True, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG13_Weights.IMAGENET1K_V1))
def vgg13(*, weights: Optional[VGG13_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-13 from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG13_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG13_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG13_Weights
:members:
"""
weights = VGG13_Weights.verify(weights)
return _vgg("B", False, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG13_BN_Weights.IMAGENET1K_V1))
def vgg13_bn(*, weights: Optional[VGG13_BN_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-13-BN from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG13_BN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG13_BN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG13_BN_Weights
:members:
"""
weights = VGG13_BN_Weights.verify(weights)
return _vgg("B", True, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG16_Weights.IMAGENET1K_V1))
def vgg16(*, weights: Optional[VGG16_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-16 from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG16_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG16_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG16_Weights
:members:
"""
weights = VGG16_Weights.verify(weights)
return _vgg("D", False, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG16_BN_Weights.IMAGENET1K_V1))
def vgg16_bn(*, weights: Optional[VGG16_BN_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-16-BN from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG16_BN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG16_BN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG16_BN_Weights
:members:
"""
weights = VGG16_BN_Weights.verify(weights)
return _vgg("D", True, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG19_Weights.IMAGENET1K_V1))
def vgg19(*, weights: Optional[VGG19_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-19 from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG19_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG19_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG19_Weights
:members:
"""
weights = VGG19_Weights.verify(weights)
return _vgg("E", False, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", VGG19_BN_Weights.IMAGENET1K_V1))
def vgg19_bn(*, weights: Optional[VGG19_BN_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG:
"""VGG-19_BN from `Very Deep Convolutional Networks for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`__.
Args:
weights (:class:`~torchvision.models.VGG19_BN_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.VGG19_BN_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vgg.VGG``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vgg.py>`_
for more details about this class.
.. autoclass:: torchvision.models.VGG19_BN_Weights
:members:
"""
weights = VGG19_BN_Weights.verify(weights)
return _vgg("E", True, weights, progress, **kwargs)
```
|
============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\video\__init__.py
ENCODING: utf-8
```py
from .mvit import *
from .resnet import *
from .s3d import *
from .swin_transformer import *
```
|
========================================================================================================================
SOURCE CODE FILE: mvit.py
LINES: 1
SIZE: 33.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\video\mvit.py
ENCODING: utf-8
```py
import math
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
import torch
import torch.fx
import torch.nn as nn
from ...ops import MLP, StochasticDepth
from ...transforms._presets import VideoClassification
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _KINETICS400_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"MViT",
"MViT_V1_B_Weights",
"mvit_v1_b",
"MViT_V2_S_Weights",
"mvit_v2_s",
]
@dataclass
class MSBlockConfig:
num_heads: int
input_channels: int
output_channels: int
kernel_q: List[int]
kernel_kv: List[int]
stride_q: List[int]
stride_kv: List[int]
def _prod(s: Sequence[int]) -> int:
product = 1
for v in s:
product *= v
return product
def _unsqueeze(x: torch.Tensor, target_dim: int, expand_dim: int) -> Tuple[torch.Tensor, int]:
tensor_dim = x.dim()
if tensor_dim == target_dim - 1:
x = x.unsqueeze(expand_dim)
elif tensor_dim != target_dim:
raise ValueError(f"Unsupported input dimension {x.shape}")
return x, tensor_dim
def _squeeze(x: torch.Tensor, target_dim: int, expand_dim: int, tensor_dim: int) -> torch.Tensor:
if tensor_dim == target_dim - 1:
x = x.squeeze(expand_dim)
return x
torch.fx.wrap("_unsqueeze")
torch.fx.wrap("_squeeze")
class Pool(nn.Module):
def __init__(
self,
pool: nn.Module,
norm: Optional[nn.Module],
activation: Optional[nn.Module] = None,
norm_before_pool: bool = False,
) -> None:
super().__init__()
self.pool = pool
layers = []
if norm is not None:
layers.append(norm)
if activation is not None:
layers.append(activation)
self.norm_act = nn.Sequential(*layers) if layers else None
self.norm_before_pool = norm_before_pool
def forward(self, x: torch.Tensor, thw: Tuple[int, int, int]) -> Tuple[torch.Tensor, Tuple[int, int, int]]:
x, tensor_dim = _unsqueeze(x, 4, 1)
# Separate the class token and reshape the input
class_token, x = torch.tensor_split(x, indices=(1,), dim=2)
x = x.transpose(2, 3)
B, N, C = x.shape[:3]
x = x.reshape((B * N, C) + thw).contiguous()
# normalizing prior pooling is useful when we use BN which can be absorbed to speed up inference
if self.norm_before_pool and self.norm_act is not None:
x = self.norm_act(x)
# apply the pool on the input and add back the token
x = self.pool(x)
T, H, W = x.shape[2:]
x = x.reshape(B, N, C, -1).transpose(2, 3)
x = torch.cat((class_token, x), dim=2)
if not self.norm_before_pool and self.norm_act is not None:
x = self.norm_act(x)
x = _squeeze(x, 4, 1, tensor_dim)
return x, (T, H, W)
def _interpolate(embedding: torch.Tensor, d: int) -> torch.Tensor:
if embedding.shape[0] == d:
return embedding
return (
nn.functional.interpolate(
embedding.permute(1, 0).unsqueeze(0),
size=d,
mode="linear",
)
.squeeze(0)
.permute(1, 0)
)
def _add_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
q_thw: Tuple[int, int, int],
k_thw: Tuple[int, int, int],
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
rel_pos_t: torch.Tensor,
) -> torch.Tensor:
# Modified code from: https://github.com/facebookresearch/SlowFast/commit/1aebd71a2efad823d52b827a3deaf15a56cf4932
q_t, q_h, q_w = q_thw
k_t, k_h, k_w = k_thw
dh = int(2 * max(q_h, k_h) - 1)
dw = int(2 * max(q_w, k_w) - 1)
dt = int(2 * max(q_t, k_t) - 1)
# Scale up rel pos if shapes for q and k are different.
q_h_ratio = max(k_h / q_h, 1.0)
k_h_ratio = max(q_h / k_h, 1.0)
dist_h = torch.arange(q_h)[:, None] * q_h_ratio - (torch.arange(k_h)[None, :] + (1.0 - k_h)) * k_h_ratio
q_w_ratio = max(k_w / q_w, 1.0)
k_w_ratio = max(q_w / k_w, 1.0)
dist_w = torch.arange(q_w)[:, None] * q_w_ratio - (torch.arange(k_w)[None, :] + (1.0 - k_w)) * k_w_ratio
q_t_ratio = max(k_t / q_t, 1.0)
k_t_ratio = max(q_t / k_t, 1.0)
dist_t = torch.arange(q_t)[:, None] * q_t_ratio - (torch.arange(k_t)[None, :] + (1.0 - k_t)) * k_t_ratio
# Interpolate rel pos if needed.
rel_pos_h = _interpolate(rel_pos_h, dh)
rel_pos_w = _interpolate(rel_pos_w, dw)
rel_pos_t = _interpolate(rel_pos_t, dt)
Rh = rel_pos_h[dist_h.long()]
Rw = rel_pos_w[dist_w.long()]
Rt = rel_pos_t[dist_t.long()]
B, n_head, _, dim = q.shape
r_q = q[:, :, 1:].reshape(B, n_head, q_t, q_h, q_w, dim)
rel_h_q = torch.einsum("bythwc,hkc->bythwk", r_q, Rh) # [B, H, q_t, qh, qw, k_h]
rel_w_q = torch.einsum("bythwc,wkc->bythwk", r_q, Rw) # [B, H, q_t, qh, qw, k_w]
# [B, H, q_t, q_h, q_w, dim] -> [q_t, B, H, q_h, q_w, dim] -> [q_t, B*H*q_h*q_w, dim]
r_q = r_q.permute(2, 0, 1, 3, 4, 5).reshape(q_t, B * n_head * q_h * q_w, dim)
# [q_t, B*H*q_h*q_w, dim] * [q_t, dim, k_t] = [q_t, B*H*q_h*q_w, k_t] -> [B*H*q_h*q_w, q_t, k_t]
rel_q_t = torch.matmul(r_q, Rt.transpose(1, 2)).transpose(0, 1)
# [B*H*q_h*q_w, q_t, k_t] -> [B, H, q_t, q_h, q_w, k_t]
rel_q_t = rel_q_t.view(B, n_head, q_h, q_w, q_t, k_t).permute(0, 1, 4, 2, 3, 5)
# Combine rel pos.
rel_pos = (
rel_h_q[:, :, :, :, :, None, :, None]
+ rel_w_q[:, :, :, :, :, None, None, :]
+ rel_q_t[:, :, :, :, :, :, None, None]
).reshape(B, n_head, q_t * q_h * q_w, k_t * k_h * k_w)
# Add it to attention
attn[:, :, 1:, 1:] += rel_pos
return attn
def _add_shortcut(x: torch.Tensor, shortcut: torch.Tensor, residual_with_cls_embed: bool):
if residual_with_cls_embed:
x.add_(shortcut)
else:
x[:, :, 1:, :] += shortcut[:, :, 1:, :]
return x
torch.fx.wrap("_add_rel_pos")
torch.fx.wrap("_add_shortcut")
class MultiscaleAttention(nn.Module):
def __init__(
self,
input_size: List[int],
embed_dim: int,
output_dim: int,
num_heads: int,
kernel_q: List[int],
kernel_kv: List[int],
stride_q: List[int],
stride_kv: List[int],
residual_pool: bool,
residual_with_cls_embed: bool,
rel_pos_embed: bool,
dropout: float = 0.0,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
) -> None:
super().__init__()
self.embed_dim = embed_dim
self.output_dim = output_dim
self.num_heads = num_heads
self.head_dim = output_dim // num_heads
self.scaler = 1.0 / math.sqrt(self.head_dim)
self.residual_pool = residual_pool
self.residual_with_cls_embed = residual_with_cls_embed
self.qkv = nn.Linear(embed_dim, 3 * output_dim)
layers: List[nn.Module] = [nn.Linear(output_dim, output_dim)]
if dropout > 0.0:
layers.append(nn.Dropout(dropout, inplace=True))
self.project = nn.Sequential(*layers)
self.pool_q: Optional[nn.Module] = None
if _prod(kernel_q) > 1 or _prod(stride_q) > 1:
padding_q = [int(q // 2) for q in kernel_q]
self.pool_q = Pool(
nn.Conv3d(
self.head_dim,
self.head_dim,
kernel_q, # type: ignore[arg-type]
stride=stride_q, # type: ignore[arg-type]
padding=padding_q, # type: ignore[arg-type]
groups=self.head_dim,
bias=False,
),
norm_layer(self.head_dim),
)
self.pool_k: Optional[nn.Module] = None
self.pool_v: Optional[nn.Module] = None
if _prod(kernel_kv) > 1 or _prod(stride_kv) > 1:
padding_kv = [int(kv // 2) for kv in kernel_kv]
self.pool_k = Pool(
nn.Conv3d(
self.head_dim,
self.head_dim,
kernel_kv, # type: ignore[arg-type]
stride=stride_kv, # type: ignore[arg-type]
padding=padding_kv, # type: ignore[arg-type]
groups=self.head_dim,
bias=False,
),
norm_layer(self.head_dim),
)
self.pool_v = Pool(
nn.Conv3d(
self.head_dim,
self.head_dim,
kernel_kv, # type: ignore[arg-type]
stride=stride_kv, # type: ignore[arg-type]
padding=padding_kv, # type: ignore[arg-type]
groups=self.head_dim,
bias=False,
),
norm_layer(self.head_dim),
)
self.rel_pos_h: Optional[nn.Parameter] = None
self.rel_pos_w: Optional[nn.Parameter] = None
self.rel_pos_t: Optional[nn.Parameter] = None
if rel_pos_embed:
size = max(input_size[1:])
q_size = size // stride_q[1] if len(stride_q) > 0 else size
kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size
spatial_dim = 2 * max(q_size, kv_size) - 1
temporal_dim = 2 * input_size[0] - 1
self.rel_pos_h = nn.Parameter(torch.zeros(spatial_dim, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(spatial_dim, self.head_dim))
self.rel_pos_t = nn.Parameter(torch.zeros(temporal_dim, self.head_dim))
nn.init.trunc_normal_(self.rel_pos_h, std=0.02)
nn.init.trunc_normal_(self.rel_pos_w, std=0.02)
nn.init.trunc_normal_(self.rel_pos_t, std=0.02)
def forward(self, x: torch.Tensor, thw: Tuple[int, int, int]) -> Tuple[torch.Tensor, Tuple[int, int, int]]:
B, N, C = x.shape
q, k, v = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).transpose(1, 3).unbind(dim=2)
if self.pool_k is not None:
k, k_thw = self.pool_k(k, thw)
else:
k_thw = thw
if self.pool_v is not None:
v = self.pool_v(v, thw)[0]
if self.pool_q is not None:
q, thw = self.pool_q(q, thw)
attn = torch.matmul(self.scaler * q, k.transpose(2, 3))
if self.rel_pos_h is not None and self.rel_pos_w is not None and self.rel_pos_t is not None:
attn = _add_rel_pos(
attn,
q,
thw,
k_thw,
self.rel_pos_h,
self.rel_pos_w,
self.rel_pos_t,
)
attn = attn.softmax(dim=-1)
x = torch.matmul(attn, v)
if self.residual_pool:
_add_shortcut(x, q, self.residual_with_cls_embed)
x = x.transpose(1, 2).reshape(B, -1, self.output_dim)
x = self.project(x)
return x, thw
class MultiscaleBlock(nn.Module):
def __init__(
self,
input_size: List[int],
cnf: MSBlockConfig,
residual_pool: bool,
residual_with_cls_embed: bool,
rel_pos_embed: bool,
proj_after_attn: bool,
dropout: float = 0.0,
stochastic_depth_prob: float = 0.0,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
) -> None:
super().__init__()
self.proj_after_attn = proj_after_attn
self.pool_skip: Optional[nn.Module] = None
if _prod(cnf.stride_q) > 1:
kernel_skip = [s + 1 if s > 1 else s for s in cnf.stride_q]
padding_skip = [int(k // 2) for k in kernel_skip]
self.pool_skip = Pool(
nn.MaxPool3d(kernel_skip, stride=cnf.stride_q, padding=padding_skip), None # type: ignore[arg-type]
)
attn_dim = cnf.output_channels if proj_after_attn else cnf.input_channels
self.norm1 = norm_layer(cnf.input_channels)
self.norm2 = norm_layer(attn_dim)
self.needs_transposal = isinstance(self.norm1, nn.BatchNorm1d)
self.attn = MultiscaleAttention(
input_size,
cnf.input_channels,
attn_dim,
cnf.num_heads,
kernel_q=cnf.kernel_q,
kernel_kv=cnf.kernel_kv,
stride_q=cnf.stride_q,
stride_kv=cnf.stride_kv,
rel_pos_embed=rel_pos_embed,
residual_pool=residual_pool,
residual_with_cls_embed=residual_with_cls_embed,
dropout=dropout,
norm_layer=norm_layer,
)
self.mlp = MLP(
attn_dim,
[4 * attn_dim, cnf.output_channels],
activation_layer=nn.GELU,
dropout=dropout,
inplace=None,
)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
self.project: Optional[nn.Module] = None
if cnf.input_channels != cnf.output_channels:
self.project = nn.Linear(cnf.input_channels, cnf.output_channels)
def forward(self, x: torch.Tensor, thw: Tuple[int, int, int]) -> Tuple[torch.Tensor, Tuple[int, int, int]]:
x_norm1 = self.norm1(x.transpose(1, 2)).transpose(1, 2) if self.needs_transposal else self.norm1(x)
x_attn, thw_new = self.attn(x_norm1, thw)
x = x if self.project is None or not self.proj_after_attn else self.project(x_norm1)
x_skip = x if self.pool_skip is None else self.pool_skip(x, thw)[0]
x = x_skip + self.stochastic_depth(x_attn)
x_norm2 = self.norm2(x.transpose(1, 2)).transpose(1, 2) if self.needs_transposal else self.norm2(x)
x_proj = x if self.project is None or self.proj_after_attn else self.project(x_norm2)
return x_proj + self.stochastic_depth(self.mlp(x_norm2)), thw_new
class PositionalEncoding(nn.Module):
def __init__(self, embed_size: int, spatial_size: Tuple[int, int], temporal_size: int, rel_pos_embed: bool) -> None:
super().__init__()
self.spatial_size = spatial_size
self.temporal_size = temporal_size
self.class_token = nn.Parameter(torch.zeros(embed_size))
self.spatial_pos: Optional[nn.Parameter] = None
self.temporal_pos: Optional[nn.Parameter] = None
self.class_pos: Optional[nn.Parameter] = None
if not rel_pos_embed:
self.spatial_pos = nn.Parameter(torch.zeros(self.spatial_size[0] * self.spatial_size[1], embed_size))
self.temporal_pos = nn.Parameter(torch.zeros(self.temporal_size, embed_size))
self.class_pos = nn.Parameter(torch.zeros(embed_size))
def forward(self, x: torch.Tensor) -> torch.Tensor:
class_token = self.class_token.expand(x.size(0), -1).unsqueeze(1)
x = torch.cat((class_token, x), dim=1)
if self.spatial_pos is not None and self.temporal_pos is not None and self.class_pos is not None:
hw_size, embed_size = self.spatial_pos.shape
pos_embedding = torch.repeat_interleave(self.temporal_pos, hw_size, dim=0)
pos_embedding.add_(self.spatial_pos.unsqueeze(0).expand(self.temporal_size, -1, -1).reshape(-1, embed_size))
pos_embedding = torch.cat((self.class_pos.unsqueeze(0), pos_embedding), dim=0).unsqueeze(0)
x.add_(pos_embedding)
return x
class MViT(nn.Module):
def __init__(
self,
spatial_size: Tuple[int, int],
temporal_size: int,
block_setting: Sequence[MSBlockConfig],
residual_pool: bool,
residual_with_cls_embed: bool,
rel_pos_embed: bool,
proj_after_attn: bool,
dropout: float = 0.5,
attention_dropout: float = 0.0,
stochastic_depth_prob: float = 0.0,
num_classes: int = 400,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
patch_embed_kernel: Tuple[int, int, int] = (3, 7, 7),
patch_embed_stride: Tuple[int, int, int] = (2, 4, 4),
patch_embed_padding: Tuple[int, int, int] = (1, 3, 3),
) -> None:
"""
MViT main class.
Args:
spatial_size (tuple of ints): The spacial size of the input as ``(H, W)``.
temporal_size (int): The temporal size ``T`` of the input.
block_setting (sequence of MSBlockConfig): The Network structure.
residual_pool (bool): If True, use MViTv2 pooling residual connection.
residual_with_cls_embed (bool): If True, the addition on the residual connection will include
the class embedding.
rel_pos_embed (bool): If True, use MViTv2's relative positional embeddings.
proj_after_attn (bool): If True, apply the projection after the attention.
dropout (float): Dropout rate. Default: 0.0.
attention_dropout (float): Attention dropout rate. Default: 0.0.
stochastic_depth_prob: (float): Stochastic depth rate. Default: 0.0.
num_classes (int): The number of classes.
block (callable, optional): Module specifying the layer which consists of the attention and mlp.
norm_layer (callable, optional): Module specifying the normalization layer to use.
patch_embed_kernel (tuple of ints): The kernel of the convolution that patchifies the input.
patch_embed_stride (tuple of ints): The stride of the convolution that patchifies the input.
patch_embed_padding (tuple of ints): The padding of the convolution that patchifies the input.
"""
super().__init__()
# This implementation employs a different parameterization scheme than the one used at PyTorch Video:
# https://github.com/facebookresearch/pytorchvideo/blob/718d0a4/pytorchvideo/models/vision_transformers.py
# We remove any experimental configuration that didn't make it to the final variants of the models. To represent
# the configuration of the architecture we use the simplified form suggested at Table 1 of the paper.
_log_api_usage_once(self)
total_stage_blocks = len(block_setting)
if total_stage_blocks == 0:
raise ValueError("The configuration parameter can't be empty.")
if block is None:
block = MultiscaleBlock
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-6)
# Patch Embedding module
self.conv_proj = nn.Conv3d(
in_channels=3,
out_channels=block_setting[0].input_channels,
kernel_size=patch_embed_kernel,
stride=patch_embed_stride,
padding=patch_embed_padding,
)
input_size = [size // stride for size, stride in zip((temporal_size,) + spatial_size, self.conv_proj.stride)]
# Spatio-Temporal Class Positional Encoding
self.pos_encoding = PositionalEncoding(
embed_size=block_setting[0].input_channels,
spatial_size=(input_size[1], input_size[2]),
temporal_size=input_size[0],
rel_pos_embed=rel_pos_embed,
)
# Encoder module
self.blocks = nn.ModuleList()
for stage_block_id, cnf in enumerate(block_setting):
# adjust stochastic depth probability based on the depth of the stage block
sd_prob = stochastic_depth_prob * stage_block_id / (total_stage_blocks - 1.0)
self.blocks.append(
block(
input_size=input_size,
cnf=cnf,
residual_pool=residual_pool,
residual_with_cls_embed=residual_with_cls_embed,
rel_pos_embed=rel_pos_embed,
proj_after_attn=proj_after_attn,
dropout=attention_dropout,
stochastic_depth_prob=sd_prob,
norm_layer=norm_layer,
)
)
if len(cnf.stride_q) > 0:
input_size = [size // stride for size, stride in zip(input_size, cnf.stride_q)]
self.norm = norm_layer(block_setting[-1].output_channels)
# Classifier module
self.head = nn.Sequential(
nn.Dropout(dropout, inplace=True),
nn.Linear(block_setting[-1].output_channels, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.LayerNorm):
if m.weight is not None:
nn.init.constant_(m.weight, 1.0)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, PositionalEncoding):
for weights in m.parameters():
nn.init.trunc_normal_(weights, std=0.02)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Convert if necessary (B, C, H, W) -> (B, C, 1, H, W)
x = _unsqueeze(x, 5, 2)[0]
# patchify and reshape: (B, C, T, H, W) -> (B, embed_channels[0], T', H', W') -> (B, THW', embed_channels[0])
x = self.conv_proj(x)
x = x.flatten(2).transpose(1, 2)
# add positional encoding
x = self.pos_encoding(x)
# pass patches through the encoder
thw = (self.pos_encoding.temporal_size,) + self.pos_encoding.spatial_size
for block in self.blocks:
x, thw = block(x, thw)
x = self.norm(x)
# classifier "token" as used by standard language architectures
x = x[:, 0]
x = self.head(x)
return x
def _mvit(
block_setting: List[MSBlockConfig],
stochastic_depth_prob: float,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> MViT:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
assert weights.meta["min_size"][0] == weights.meta["min_size"][1]
_ovewrite_named_param(kwargs, "spatial_size", weights.meta["min_size"])
_ovewrite_named_param(kwargs, "temporal_size", weights.meta["min_temporal_size"])
spatial_size = kwargs.pop("spatial_size", (224, 224))
temporal_size = kwargs.pop("temporal_size", 16)
model = MViT(
spatial_size=spatial_size,
temporal_size=temporal_size,
block_setting=block_setting,
residual_pool=kwargs.pop("residual_pool", False),
residual_with_cls_embed=kwargs.pop("residual_with_cls_embed", True),
rel_pos_embed=kwargs.pop("rel_pos_embed", False),
proj_after_attn=kwargs.pop("proj_after_attn", False),
stochastic_depth_prob=stochastic_depth_prob,
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
class MViT_V1_B_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/mvit_v1_b-dbeb1030.pth",
transforms=partial(
VideoClassification,
crop_size=(224, 224),
resize_size=(256,),
mean=(0.45, 0.45, 0.45),
std=(0.225, 0.225, 0.225),
),
meta={
"min_size": (224, 224),
"min_temporal_size": 16,
"categories": _KINETICS400_CATEGORIES,
"recipe": "https://github.com/facebookresearch/pytorchvideo/blob/main/docs/source/model_zoo.md",
"_docs": (
"The weights were ported from the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=7.5`, `clips_per_video=5`, and `clip_len=16`"
),
"num_params": 36610672,
"_metrics": {
"Kinetics-400": {
"acc@1": 78.477,
"acc@5": 93.582,
}
},
"_ops": 70.599,
"_file_size": 139.764,
},
)
DEFAULT = KINETICS400_V1
class MViT_V2_S_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/mvit_v2_s-ae3be167.pth",
transforms=partial(
VideoClassification,
crop_size=(224, 224),
resize_size=(256,),
mean=(0.45, 0.45, 0.45),
std=(0.225, 0.225, 0.225),
),
meta={
"min_size": (224, 224),
"min_temporal_size": 16,
"categories": _KINETICS400_CATEGORIES,
"recipe": "https://github.com/facebookresearch/SlowFast/blob/main/MODEL_ZOO.md",
"_docs": (
"The weights were ported from the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=7.5`, `clips_per_video=5`, and `clip_len=16`"
),
"num_params": 34537744,
"_metrics": {
"Kinetics-400": {
"acc@1": 80.757,
"acc@5": 94.665,
}
},
"_ops": 64.224,
"_file_size": 131.884,
},
)
DEFAULT = KINETICS400_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", MViT_V1_B_Weights.KINETICS400_V1))
def mvit_v1_b(*, weights: Optional[MViT_V1_B_Weights] = None, progress: bool = True, **kwargs: Any) -> MViT:
"""
Constructs a base MViTV1 architecture from
`Multiscale Vision Transformers <https://arxiv.org/abs/2104.11227>`__.
.. betastatus:: video module
Args:
weights (:class:`~torchvision.models.video.MViT_V1_B_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.MViT_V1_B_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.MViT``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/mvit.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.MViT_V1_B_Weights
:members:
"""
weights = MViT_V1_B_Weights.verify(weights)
config: Dict[str, List] = {
"num_heads": [1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8],
"input_channels": [96, 192, 192, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 768, 768],
"output_channels": [192, 192, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 768, 768, 768],
"kernel_q": [[], [3, 3, 3], [], [3, 3, 3], [], [], [], [], [], [], [], [], [], [], [3, 3, 3], []],
"kernel_kv": [
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
],
"stride_q": [[], [1, 2, 2], [], [1, 2, 2], [], [], [], [], [], [], [], [], [], [], [1, 2, 2], []],
"stride_kv": [
[1, 8, 8],
[1, 4, 4],
[1, 4, 4],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 1, 1],
[1, 1, 1],
],
}
block_setting = []
for i in range(len(config["num_heads"])):
block_setting.append(
MSBlockConfig(
num_heads=config["num_heads"][i],
input_channels=config["input_channels"][i],
output_channels=config["output_channels"][i],
kernel_q=config["kernel_q"][i],
kernel_kv=config["kernel_kv"][i],
stride_q=config["stride_q"][i],
stride_kv=config["stride_kv"][i],
)
)
return _mvit(
spatial_size=(224, 224),
temporal_size=16,
block_setting=block_setting,
residual_pool=False,
residual_with_cls_embed=False,
stochastic_depth_prob=kwargs.pop("stochastic_depth_prob", 0.2),
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", MViT_V2_S_Weights.KINETICS400_V1))
def mvit_v2_s(*, weights: Optional[MViT_V2_S_Weights] = None, progress: bool = True, **kwargs: Any) -> MViT:
"""Constructs a small MViTV2 architecture from
`Multiscale Vision Transformers <https://arxiv.org/abs/2104.11227>`__ and
`MViTv2: Improved Multiscale Vision Transformers for Classification
and Detection <https://arxiv.org/abs/2112.01526>`__.
.. betastatus:: video module
Args:
weights (:class:`~torchvision.models.video.MViT_V2_S_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.MViT_V2_S_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.MViT``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/mvit.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.MViT_V2_S_Weights
:members:
"""
weights = MViT_V2_S_Weights.verify(weights)
config: Dict[str, List] = {
"num_heads": [1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8],
"input_channels": [96, 96, 192, 192, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 768],
"output_channels": [96, 192, 192, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 768, 768],
"kernel_q": [
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
],
"kernel_kv": [
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
],
"stride_q": [
[1, 1, 1],
[1, 2, 2],
[1, 1, 1],
[1, 2, 2],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 2, 2],
[1, 1, 1],
],
"stride_kv": [
[1, 8, 8],
[1, 4, 4],
[1, 4, 4],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 2, 2],
[1, 1, 1],
[1, 1, 1],
],
}
block_setting = []
for i in range(len(config["num_heads"])):
block_setting.append(
MSBlockConfig(
num_heads=config["num_heads"][i],
input_channels=config["input_channels"][i],
output_channels=config["output_channels"][i],
kernel_q=config["kernel_q"][i],
kernel_kv=config["kernel_kv"][i],
stride_q=config["stride_q"][i],
stride_kv=config["stride_kv"][i],
)
)
return _mvit(
spatial_size=(224, 224),
temporal_size=16,
block_setting=block_setting,
residual_pool=True,
residual_with_cls_embed=False,
rel_pos_embed=True,
proj_after_attn=True,
stochastic_depth_prob=kwargs.pop("stochastic_depth_prob", 0.2),
weights=weights,
progress=progress,
**kwargs,
)
```
|
==========================================================================================================================
SOURCE CODE FILE: resnet.py
LINES: 1
SIZE: 16.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\video\resnet.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import torch.nn as nn
from torch import Tensor
from ...transforms._presets import VideoClassification
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _KINETICS400_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"VideoResNet",
"R3D_18_Weights",
"MC3_18_Weights",
"R2Plus1D_18_Weights",
"r3d_18",
"mc3_18",
"r2plus1d_18",
]
class Conv3DSimple(nn.Conv3d):
def __init__(
self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1
) -> None:
super().__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(3, 3, 3),
stride=stride,
padding=padding,
bias=False,
)
@staticmethod
def get_downsample_stride(stride: int) -> Tuple[int, int, int]:
return stride, stride, stride
class Conv2Plus1D(nn.Sequential):
def __init__(self, in_planes: int, out_planes: int, midplanes: int, stride: int = 1, padding: int = 1) -> None:
super().__init__(
nn.Conv3d(
in_planes,
midplanes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False,
),
nn.BatchNorm3d(midplanes),
nn.ReLU(inplace=True),
nn.Conv3d(
midplanes, out_planes, kernel_size=(3, 1, 1), stride=(stride, 1, 1), padding=(padding, 0, 0), bias=False
),
)
@staticmethod
def get_downsample_stride(stride: int) -> Tuple[int, int, int]:
return stride, stride, stride
class Conv3DNoTemporal(nn.Conv3d):
def __init__(
self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1
) -> None:
super().__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False,
)
@staticmethod
def get_downsample_stride(stride: int) -> Tuple[int, int, int]:
return 1, stride, stride
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes: int,
planes: int,
conv_builder: Callable[..., nn.Module],
stride: int = 1,
downsample: Optional[nn.Module] = None,
) -> None:
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
super().__init__()
self.conv1 = nn.Sequential(
conv_builder(inplanes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(conv_builder(planes, planes, midplanes), nn.BatchNorm3d(planes))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes: int,
planes: int,
conv_builder: Callable[..., nn.Module],
stride: int = 1,
downsample: Optional[nn.Module] = None,
) -> None:
super().__init__()
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
# 1x1x1
self.conv1 = nn.Sequential(
nn.Conv3d(inplanes, planes, kernel_size=1, bias=False), nn.BatchNorm3d(planes), nn.ReLU(inplace=True)
)
# Second kernel
self.conv2 = nn.Sequential(
conv_builder(planes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True)
)
# 1x1x1
self.conv3 = nn.Sequential(
nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False),
nn.BatchNorm3d(planes * self.expansion),
)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicStem(nn.Sequential):
"""The default conv-batchnorm-relu stem"""
def __init__(self) -> None:
super().__init__(
nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
class R2Plus1dStem(nn.Sequential):
"""R(2+1)D stem is different than the default one as it uses separated 3D convolution"""
def __init__(self) -> None:
super().__init__(
nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False),
nn.BatchNorm3d(45),
nn.ReLU(inplace=True),
nn.Conv3d(45, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
class VideoResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]],
layers: List[int],
stem: Callable[..., nn.Module],
num_classes: int = 400,
zero_init_residual: bool = False,
) -> None:
"""Generic resnet video generator.
Args:
block (Type[Union[BasicBlock, Bottleneck]]): resnet building block
conv_makers (List[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]]): generator
function for each layer
layers (List[int]): number of blocks per layer
stem (Callable[..., nn.Module]): module specifying the ResNet stem.
num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
"""
super().__init__()
_log_api_usage_once(self)
self.inplanes = 64
self.stem = stem()
self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
# init weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[union-attr, arg-type]
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# Flatten the layer to fc
x = x.flatten(1)
x = self.fc(x)
return x
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
conv_builder: Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]],
planes: int,
blocks: int,
stride: int = 1,
) -> nn.Sequential:
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
ds_stride = conv_builder.get_downsample_stride(stride)
downsample = nn.Sequential(
nn.Conv3d(self.inplanes, planes * block.expansion, kernel_size=1, stride=ds_stride, bias=False),
nn.BatchNorm3d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, conv_builder, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, conv_builder))
return nn.Sequential(*layers)
def _video_resnet(
block: Type[Union[BasicBlock, Bottleneck]],
conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]],
layers: List[int],
stem: Callable[..., nn.Module],
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> VideoResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = VideoResNet(block, conv_makers, layers, stem, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _KINETICS400_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/video_classification",
"_docs": (
"The weights reproduce closely the accuracy of the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=15`, `clips_per_video=5`, and `clip_len=16`."
),
}
class R3D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r3d_18-b3b3357e.pth",
transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"num_params": 33371472,
"_metrics": {
"Kinetics-400": {
"acc@1": 63.200,
"acc@5": 83.479,
}
},
"_ops": 40.697,
"_file_size": 127.359,
},
)
DEFAULT = KINETICS400_V1
class MC3_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/mc3_18-a90a0ba3.pth",
transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"num_params": 11695440,
"_metrics": {
"Kinetics-400": {
"acc@1": 63.960,
"acc@5": 84.130,
}
},
"_ops": 43.343,
"_file_size": 44.672,
},
)
DEFAULT = KINETICS400_V1
class R2Plus1D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth",
transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"num_params": 31505325,
"_metrics": {
"Kinetics-400": {
"acc@1": 67.463,
"acc@5": 86.175,
}
},
"_ops": 40.519,
"_file_size": 120.318,
},
)
DEFAULT = KINETICS400_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", R3D_18_Weights.KINETICS400_V1))
def r3d_18(*, weights: Optional[R3D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
"""Construct 18 layer Resnet3D model.
.. betastatus:: video module
Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition <https://arxiv.org/abs/1711.11248>`__.
Args:
weights (:class:`~torchvision.models.video.R3D_18_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.R3D_18_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class.
Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.R3D_18_Weights
:members:
"""
weights = R3D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] * 4,
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", MC3_18_Weights.KINETICS400_V1))
def mc3_18(*, weights: Optional[MC3_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
"""Construct 18 layer Mixed Convolution network as in
.. betastatus:: video module
Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition <https://arxiv.org/abs/1711.11248>`__.
Args:
weights (:class:`~torchvision.models.video.MC3_18_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.MC3_18_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class.
Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.MC3_18_Weights
:members:
"""
weights = MC3_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] + [Conv3DNoTemporal] * 3, # type: ignore[list-item]
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", R2Plus1D_18_Weights.KINETICS400_V1))
def r2plus1d_18(*, weights: Optional[R2Plus1D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
"""Construct 18 layer deep R(2+1)D network as in
.. betastatus:: video module
Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition <https://arxiv.org/abs/1711.11248>`__.
Args:
weights (:class:`~torchvision.models.video.R2Plus1D_18_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.R2Plus1D_18_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class.
Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.R2Plus1D_18_Weights
:members:
"""
weights = R2Plus1D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv2Plus1D] * 4,
[2, 2, 2, 2],
R2Plus1dStem,
weights,
progress,
**kwargs,
)
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"r3d_18": R3D_18_Weights.KINETICS400_V1.url,
"mc3_18": MC3_18_Weights.KINETICS400_V1.url,
"r2plus1d_18": R2Plus1D_18_Weights.KINETICS400_V1.url,
}
)
```
|
=======================================================================================================================
SOURCE CODE FILE: s3d.py
LINES: 1
SIZE: 7.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\video\s3d.py
ENCODING: utf-8
```py
from functools import partial
from typing import Any, Callable, Optional
import torch
from torch import nn
from torchvision.ops.misc import Conv3dNormActivation
from ...transforms._presets import VideoClassification
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _KINETICS400_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"S3D",
"S3D_Weights",
"s3d",
]
class TemporalSeparableConv(nn.Sequential):
def __init__(
self,
in_planes: int,
out_planes: int,
kernel_size: int,
stride: int,
padding: int,
norm_layer: Callable[..., nn.Module],
):
super().__init__(
Conv3dNormActivation(
in_planes,
out_planes,
kernel_size=(1, kernel_size, kernel_size),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False,
norm_layer=norm_layer,
),
Conv3dNormActivation(
out_planes,
out_planes,
kernel_size=(kernel_size, 1, 1),
stride=(stride, 1, 1),
padding=(padding, 0, 0),
bias=False,
norm_layer=norm_layer,
),
)
class SepInceptionBlock3D(nn.Module):
def __init__(
self,
in_planes: int,
b0_out: int,
b1_mid: int,
b1_out: int,
b2_mid: int,
b2_out: int,
b3_out: int,
norm_layer: Callable[..., nn.Module],
):
super().__init__()
self.branch0 = Conv3dNormActivation(in_planes, b0_out, kernel_size=1, stride=1, norm_layer=norm_layer)
self.branch1 = nn.Sequential(
Conv3dNormActivation(in_planes, b1_mid, kernel_size=1, stride=1, norm_layer=norm_layer),
TemporalSeparableConv(b1_mid, b1_out, kernel_size=3, stride=1, padding=1, norm_layer=norm_layer),
)
self.branch2 = nn.Sequential(
Conv3dNormActivation(in_planes, b2_mid, kernel_size=1, stride=1, norm_layer=norm_layer),
TemporalSeparableConv(b2_mid, b2_out, kernel_size=3, stride=1, padding=1, norm_layer=norm_layer),
)
self.branch3 = nn.Sequential(
nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1),
Conv3dNormActivation(in_planes, b3_out, kernel_size=1, stride=1, norm_layer=norm_layer),
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class S3D(nn.Module):
"""S3D main class.
Args:
num_class (int): number of classes for the classification task.
dropout (float): dropout probability.
norm_layer (Optional[Callable]): Module specifying the normalization layer to use.
Inputs:
x (Tensor): batch of videos with dimensions (batch, channel, time, height, width)
"""
def __init__(
self,
num_classes: int = 400,
dropout: float = 0.2,
norm_layer: Optional[Callable[..., torch.nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once(self)
if norm_layer is None:
norm_layer = partial(nn.BatchNorm3d, eps=0.001, momentum=0.001)
self.features = nn.Sequential(
TemporalSeparableConv(3, 64, 7, 2, 3, norm_layer),
nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)),
Conv3dNormActivation(
64,
64,
kernel_size=1,
stride=1,
norm_layer=norm_layer,
),
TemporalSeparableConv(64, 192, 3, 1, 1, norm_layer),
nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)),
SepInceptionBlock3D(192, 64, 96, 128, 16, 32, 32, norm_layer),
SepInceptionBlock3D(256, 128, 128, 192, 32, 96, 64, norm_layer),
nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)),
SepInceptionBlock3D(480, 192, 96, 208, 16, 48, 64, norm_layer),
SepInceptionBlock3D(512, 160, 112, 224, 24, 64, 64, norm_layer),
SepInceptionBlock3D(512, 128, 128, 256, 24, 64, 64, norm_layer),
SepInceptionBlock3D(512, 112, 144, 288, 32, 64, 64, norm_layer),
SepInceptionBlock3D(528, 256, 160, 320, 32, 128, 128, norm_layer),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 0, 0)),
SepInceptionBlock3D(832, 256, 160, 320, 32, 128, 128, norm_layer),
SepInceptionBlock3D(832, 384, 192, 384, 48, 128, 128, norm_layer),
)
self.avgpool = nn.AvgPool3d(kernel_size=(2, 7, 7), stride=1)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Conv3d(1024, num_classes, kernel_size=1, stride=1, bias=True),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = self.classifier(x)
x = torch.mean(x, dim=(2, 3, 4))
return x
class S3D_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/s3d-d76dad2f.pth",
transforms=partial(
VideoClassification,
crop_size=(224, 224),
resize_size=(256, 256),
),
meta={
"min_size": (224, 224),
"min_temporal_size": 14,
"categories": _KINETICS400_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/video_classification#s3d",
"_docs": (
"The weights aim to approximate the accuracy of the paper. The accuracies are estimated on clip-level "
"with parameters `frame_rate=15`, `clips_per_video=1`, and `clip_len=128`."
),
"num_params": 8320048,
"_metrics": {
"Kinetics-400": {
"acc@1": 68.368,
"acc@5": 88.050,
}
},
"_ops": 17.979,
"_file_size": 31.972,
},
)
DEFAULT = KINETICS400_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", S3D_Weights.KINETICS400_V1))
def s3d(*, weights: Optional[S3D_Weights] = None, progress: bool = True, **kwargs: Any) -> S3D:
"""Construct Separable 3D CNN model.
Reference: `Rethinking Spatiotemporal Feature Learning <https://arxiv.org/abs/1712.04851>`__.
.. betastatus:: video module
Args:
weights (:class:`~torchvision.models.video.S3D_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.S3D_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.S3D`` base class.
Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/s3d.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.S3D_Weights
:members:
"""
weights = S3D_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = S3D(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
```
|
====================================================================================================================================
SOURCE CODE FILE: swin_transformer.py
LINES: 1
SIZE: 27.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\video\swin_transformer.py
ENCODING: utf-8
```py
# Modified from 2d Swin Transformers in torchvision:
# https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py
from functools import partial
from typing import Any, Callable, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from ...transforms._presets import VideoClassification
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _KINETICS400_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from ..swin_transformer import PatchMerging, SwinTransformerBlock
__all__ = [
"SwinTransformer3d",
"Swin3D_T_Weights",
"Swin3D_S_Weights",
"Swin3D_B_Weights",
"swin3d_t",
"swin3d_s",
"swin3d_b",
]
def _get_window_and_shift_size(
shift_size: List[int], size_dhw: List[int], window_size: List[int]
) -> Tuple[List[int], List[int]]:
for i in range(3):
if size_dhw[i] <= window_size[i]:
# In this case, window_size will adapt to the input size, and no need to shift
window_size[i] = size_dhw[i]
shift_size[i] = 0
return window_size, shift_size
torch.fx.wrap("_get_window_and_shift_size")
def _get_relative_position_bias(
relative_position_bias_table: torch.Tensor, relative_position_index: torch.Tensor, window_size: List[int]
) -> Tensor:
window_vol = window_size[0] * window_size[1] * window_size[2]
# In 3d case we flatten the relative_position_bias
relative_position_bias = relative_position_bias_table[
relative_position_index[:window_vol, :window_vol].flatten() # type: ignore[index]
]
relative_position_bias = relative_position_bias.view(window_vol, window_vol, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous().unsqueeze(0)
return relative_position_bias
torch.fx.wrap("_get_relative_position_bias")
def _compute_pad_size_3d(size_dhw: Tuple[int, int, int], patch_size: Tuple[int, int, int]) -> Tuple[int, int, int]:
pad_size = [(patch_size[i] - size_dhw[i] % patch_size[i]) % patch_size[i] for i in range(3)]
return pad_size[0], pad_size[1], pad_size[2]
torch.fx.wrap("_compute_pad_size_3d")
def _compute_attention_mask_3d(
x: Tensor,
size_dhw: Tuple[int, int, int],
window_size: Tuple[int, int, int],
shift_size: Tuple[int, int, int],
) -> Tensor:
# generate attention mask
attn_mask = x.new_zeros(*size_dhw)
num_windows = (size_dhw[0] // window_size[0]) * (size_dhw[1] // window_size[1]) * (size_dhw[2] // window_size[2])
slices = [
(
(0, -window_size[i]),
(-window_size[i], -shift_size[i]),
(-shift_size[i], None),
)
for i in range(3)
]
count = 0
for d in slices[0]:
for h in slices[1]:
for w in slices[2]:
attn_mask[d[0] : d[1], h[0] : h[1], w[0] : w[1]] = count
count += 1
# Partition window on attn_mask
attn_mask = attn_mask.view(
size_dhw[0] // window_size[0],
window_size[0],
size_dhw[1] // window_size[1],
window_size[1],
size_dhw[2] // window_size[2],
window_size[2],
)
attn_mask = attn_mask.permute(0, 2, 4, 1, 3, 5).reshape(
num_windows, window_size[0] * window_size[1] * window_size[2]
)
attn_mask = attn_mask.unsqueeze(1) - attn_mask.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
torch.fx.wrap("_compute_attention_mask_3d")
def shifted_window_attention_3d(
input: Tensor,
qkv_weight: Tensor,
proj_weight: Tensor,
relative_position_bias: Tensor,
window_size: List[int],
num_heads: int,
shift_size: List[int],
attention_dropout: float = 0.0,
dropout: float = 0.0,
qkv_bias: Optional[Tensor] = None,
proj_bias: Optional[Tensor] = None,
training: bool = True,
) -> Tensor:
"""
Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
input (Tensor[B, T, H, W, C]): The input tensor, 5-dimensions.
qkv_weight (Tensor[in_dim, out_dim]): The weight tensor of query, key, value.
proj_weight (Tensor[out_dim, out_dim]): The weight tensor of projection.
relative_position_bias (Tensor): The learned relative position bias added to attention.
window_size (List[int]): 3-dimensions window size, T, H, W .
num_heads (int): Number of attention heads.
shift_size (List[int]): Shift size for shifted window attention (T, H, W).
attention_dropout (float): Dropout ratio of attention weight. Default: 0.0.
dropout (float): Dropout ratio of output. Default: 0.0.
qkv_bias (Tensor[out_dim], optional): The bias tensor of query, key, value. Default: None.
proj_bias (Tensor[out_dim], optional): The bias tensor of projection. Default: None.
training (bool, optional): Training flag used by the dropout parameters. Default: True.
Returns:
Tensor[B, T, H, W, C]: The output tensor after shifted window attention.
"""
b, t, h, w, c = input.shape
# pad feature maps to multiples of window size
pad_size = _compute_pad_size_3d((t, h, w), (window_size[0], window_size[1], window_size[2]))
x = F.pad(input, (0, 0, 0, pad_size[2], 0, pad_size[1], 0, pad_size[0]))
_, tp, hp, wp, _ = x.shape
padded_size = (tp, hp, wp)
# cyclic shift
if sum(shift_size) > 0:
x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
# partition windows
num_windows = (
(padded_size[0] // window_size[0]) * (padded_size[1] // window_size[1]) * (padded_size[2] // window_size[2])
)
x = x.view(
b,
padded_size[0] // window_size[0],
window_size[0],
padded_size[1] // window_size[1],
window_size[1],
padded_size[2] // window_size[2],
window_size[2],
c,
)
x = x.permute(0, 1, 3, 5, 2, 4, 6, 7).reshape(
b * num_windows, window_size[0] * window_size[1] * window_size[2], c
) # B*nW, Wd*Wh*Ww, C
# multi-head attention
qkv = F.linear(x, qkv_weight, qkv_bias)
qkv = qkv.reshape(x.size(0), x.size(1), 3, num_heads, c // num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * (c // num_heads) ** -0.5
attn = q.matmul(k.transpose(-2, -1))
# add relative position bias
attn = attn + relative_position_bias
if sum(shift_size) > 0:
# generate attention mask to handle shifted windows with varying size
attn_mask = _compute_attention_mask_3d(
x,
(padded_size[0], padded_size[1], padded_size[2]),
(window_size[0], window_size[1], window_size[2]),
(shift_size[0], shift_size[1], shift_size[2]),
)
attn = attn.view(x.size(0) // num_windows, num_windows, num_heads, x.size(1), x.size(1))
attn = attn + attn_mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, num_heads, x.size(1), x.size(1))
attn = F.softmax(attn, dim=-1)
attn = F.dropout(attn, p=attention_dropout, training=training)
x = attn.matmul(v).transpose(1, 2).reshape(x.size(0), x.size(1), c)
x = F.linear(x, proj_weight, proj_bias)
x = F.dropout(x, p=dropout, training=training)
# reverse windows
x = x.view(
b,
padded_size[0] // window_size[0],
padded_size[1] // window_size[1],
padded_size[2] // window_size[2],
window_size[0],
window_size[1],
window_size[2],
c,
)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).reshape(b, tp, hp, wp, c)
# reverse cyclic shift
if sum(shift_size) > 0:
x = torch.roll(x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
# unpad features
x = x[:, :t, :h, :w, :].contiguous()
return x
torch.fx.wrap("shifted_window_attention_3d")
class ShiftedWindowAttention3d(nn.Module):
"""
See :func:`shifted_window_attention_3d`.
"""
def __init__(
self,
dim: int,
window_size: List[int],
shift_size: List[int],
num_heads: int,
qkv_bias: bool = True,
proj_bias: bool = True,
attention_dropout: float = 0.0,
dropout: float = 0.0,
) -> None:
super().__init__()
if len(window_size) != 3 or len(shift_size) != 3:
raise ValueError("window_size and shift_size must be of length 2")
self.window_size = window_size # Wd, Wh, Ww
self.shift_size = shift_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.dropout = dropout
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim, bias=proj_bias)
self.define_relative_position_bias_table()
self.define_relative_position_index()
def define_relative_position_bias_table(self) -> None:
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros(
(2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1),
self.num_heads,
)
) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02)
def define_relative_position_index(self) -> None:
# get pair-wise relative position index for each token inside the window
coords_dhw = [torch.arange(self.window_size[i]) for i in range(3)]
coords = torch.stack(
torch.meshgrid(coords_dhw[0], coords_dhw[1], coords_dhw[2], indexing="ij")
) # 3, Wd, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Wd*Wh*Ww, Wd*Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= 2 * self.window_size[2] - 1
# We don't flatten the relative_position_index here in 3d case.
relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
def get_relative_position_bias(self, window_size: List[int]) -> torch.Tensor:
return _get_relative_position_bias(self.relative_position_bias_table, self.relative_position_index, window_size) # type: ignore
def forward(self, x: Tensor) -> Tensor:
_, t, h, w, _ = x.shape
size_dhw = [t, h, w]
window_size, shift_size = self.window_size.copy(), self.shift_size.copy()
# Handle case where window_size is larger than the input tensor
window_size, shift_size = _get_window_and_shift_size(shift_size, size_dhw, window_size)
relative_position_bias = self.get_relative_position_bias(window_size)
return shifted_window_attention_3d(
x,
self.qkv.weight,
self.proj.weight,
relative_position_bias,
window_size,
self.num_heads,
shift_size=shift_size,
attention_dropout=self.attention_dropout,
dropout=self.dropout,
qkv_bias=self.qkv.bias,
proj_bias=self.proj.bias,
training=self.training,
)
# Modified from:
# https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/mmaction/models/backbones/swin_transformer.py
class PatchEmbed3d(nn.Module):
"""Video to Patch Embedding.
Args:
patch_size (List[int]): Patch token size.
in_channels (int): Number of input channels. Default: 3
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(
self,
patch_size: List[int],
in_channels: int = 3,
embed_dim: int = 96,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once(self)
self.tuple_patch_size = (patch_size[0], patch_size[1], patch_size[2])
self.proj = nn.Conv3d(
in_channels,
embed_dim,
kernel_size=self.tuple_patch_size,
stride=self.tuple_patch_size,
)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = nn.Identity()
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
# padding
_, _, t, h, w = x.size()
pad_size = _compute_pad_size_3d((t, h, w), self.tuple_patch_size)
x = F.pad(x, (0, pad_size[2], 0, pad_size[1], 0, pad_size[0]))
x = self.proj(x) # B C T Wh Ww
x = x.permute(0, 2, 3, 4, 1) # B T Wh Ww C
if self.norm is not None:
x = self.norm(x)
return x
class SwinTransformer3d(nn.Module):
"""
Implements 3D Swin Transformer from the `"Video Swin Transformer" <https://arxiv.org/abs/2106.13230>`_ paper.
Args:
patch_size (List[int]): Patch size.
embed_dim (int): Patch embedding dimension.
depths (List(int)): Depth of each Swin Transformer layer.
num_heads (List(int)): Number of attention heads in different layers.
window_size (List[int]): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0.
dropout (float): Dropout rate. Default: 0.0.
attention_dropout (float): Attention dropout rate. Default: 0.0.
stochastic_depth_prob (float): Stochastic depth rate. Default: 0.1.
num_classes (int): Number of classes for classification head. Default: 400.
norm_layer (nn.Module, optional): Normalization layer. Default: None.
block (nn.Module, optional): SwinTransformer Block. Default: None.
downsample_layer (nn.Module): Downsample layer (patch merging). Default: PatchMerging.
patch_embed (nn.Module, optional): Patch Embedding layer. Default: None.
"""
def __init__(
self,
patch_size: List[int],
embed_dim: int,
depths: List[int],
num_heads: List[int],
window_size: List[int],
mlp_ratio: float = 4.0,
dropout: float = 0.0,
attention_dropout: float = 0.0,
stochastic_depth_prob: float = 0.1,
num_classes: int = 400,
norm_layer: Optional[Callable[..., nn.Module]] = None,
block: Optional[Callable[..., nn.Module]] = None,
downsample_layer: Callable[..., nn.Module] = PatchMerging,
patch_embed: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once(self)
self.num_classes = num_classes
if block is None:
block = partial(SwinTransformerBlock, attn_layer=ShiftedWindowAttention3d)
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-5)
if patch_embed is None:
patch_embed = PatchEmbed3d
# split image into non-overlapping patches
self.patch_embed = patch_embed(patch_size=patch_size, embed_dim=embed_dim, norm_layer=norm_layer)
self.pos_drop = nn.Dropout(p=dropout)
layers: List[nn.Module] = []
total_stage_blocks = sum(depths)
stage_block_id = 0
# build SwinTransformer blocks
for i_stage in range(len(depths)):
stage: List[nn.Module] = []
dim = embed_dim * 2**i_stage
for i_layer in range(depths[i_stage]):
# adjust stochastic depth probability based on the depth of the stage block
sd_prob = stochastic_depth_prob * float(stage_block_id) / (total_stage_blocks - 1)
stage.append(
block(
dim,
num_heads[i_stage],
window_size=window_size,
shift_size=[0 if i_layer % 2 == 0 else w // 2 for w in window_size],
mlp_ratio=mlp_ratio,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth_prob=sd_prob,
norm_layer=norm_layer,
attn_layer=ShiftedWindowAttention3d,
)
)
stage_block_id += 1
layers.append(nn.Sequential(*stage))
# add patch merging layer
if i_stage < (len(depths) - 1):
layers.append(downsample_layer(dim, norm_layer))
self.features = nn.Sequential(*layers)
self.num_features = embed_dim * 2 ** (len(depths) - 1)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool3d(1)
self.head = nn.Linear(self.num_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, x: Tensor) -> Tensor:
# x: B C T H W
x = self.patch_embed(x) # B _T _H _W C
x = self.pos_drop(x)
x = self.features(x) # B _T _H _W C
x = self.norm(x)
x = x.permute(0, 4, 1, 2, 3) # B, C, _T, _H, _W
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.head(x)
return x
def _swin_transformer3d(
patch_size: List[int],
embed_dim: int,
depths: List[int],
num_heads: List[int],
window_size: List[int],
stochastic_depth_prob: float,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> SwinTransformer3d:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = SwinTransformer3d(
patch_size=patch_size,
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
stochastic_depth_prob=stochastic_depth_prob,
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"categories": _KINETICS400_CATEGORIES,
"min_size": (1, 1),
"min_temporal_size": 1,
}
class Swin3D_T_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/swin3d_t-7615ae03.pth",
transforms=partial(
VideoClassification,
crop_size=(224, 224),
resize_size=(256,),
mean=(0.4850, 0.4560, 0.4060),
std=(0.2290, 0.2240, 0.2250),
),
meta={
**_COMMON_META,
"recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400",
"_docs": (
"The weights were ported from the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`"
),
"num_params": 28158070,
"_metrics": {
"Kinetics-400": {
"acc@1": 77.715,
"acc@5": 93.519,
}
},
"_ops": 43.882,
"_file_size": 121.543,
},
)
DEFAULT = KINETICS400_V1
class Swin3D_S_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/swin3d_s-da41c237.pth",
transforms=partial(
VideoClassification,
crop_size=(224, 224),
resize_size=(256,),
mean=(0.4850, 0.4560, 0.4060),
std=(0.2290, 0.2240, 0.2250),
),
meta={
**_COMMON_META,
"recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400",
"_docs": (
"The weights were ported from the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`"
),
"num_params": 49816678,
"_metrics": {
"Kinetics-400": {
"acc@1": 79.521,
"acc@5": 94.158,
}
},
"_ops": 82.841,
"_file_size": 218.288,
},
)
DEFAULT = KINETICS400_V1
class Swin3D_B_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/swin3d_b_1k-24f7c7c6.pth",
transforms=partial(
VideoClassification,
crop_size=(224, 224),
resize_size=(256,),
mean=(0.4850, 0.4560, 0.4060),
std=(0.2290, 0.2240, 0.2250),
),
meta={
**_COMMON_META,
"recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400",
"_docs": (
"The weights were ported from the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`"
),
"num_params": 88048984,
"_metrics": {
"Kinetics-400": {
"acc@1": 79.427,
"acc@5": 94.386,
}
},
"_ops": 140.667,
"_file_size": 364.134,
},
)
KINETICS400_IMAGENET22K_V1 = Weights(
url="https://download.pytorch.org/models/swin3d_b_22k-7c6ae6fa.pth",
transforms=partial(
VideoClassification,
crop_size=(224, 224),
resize_size=(256,),
mean=(0.4850, 0.4560, 0.4060),
std=(0.2290, 0.2240, 0.2250),
),
meta={
**_COMMON_META,
"recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400",
"_docs": (
"The weights were ported from the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`"
),
"num_params": 88048984,
"_metrics": {
"Kinetics-400": {
"acc@1": 81.643,
"acc@5": 95.574,
}
},
"_ops": 140.667,
"_file_size": 364.134,
},
)
DEFAULT = KINETICS400_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin3D_T_Weights.KINETICS400_V1))
def swin3d_t(*, weights: Optional[Swin3D_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer3d:
"""
Constructs a swin_tiny architecture from
`Video Swin Transformer <https://arxiv.org/abs/2106.13230>`_.
Args:
weights (:class:`~torchvision.models.video.Swin3D_T_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.Swin3D_T_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.Swin3D_T_Weights
:members:
"""
weights = Swin3D_T_Weights.verify(weights)
return _swin_transformer3d(
patch_size=[2, 4, 4],
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=[8, 7, 7],
stochastic_depth_prob=0.1,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin3D_S_Weights.KINETICS400_V1))
def swin3d_s(*, weights: Optional[Swin3D_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer3d:
"""
Constructs a swin_small architecture from
`Video Swin Transformer <https://arxiv.org/abs/2106.13230>`_.
Args:
weights (:class:`~torchvision.models.video.Swin3D_S_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.Swin3D_S_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.Swin3D_S_Weights
:members:
"""
weights = Swin3D_S_Weights.verify(weights)
return _swin_transformer3d(
patch_size=[2, 4, 4],
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=[8, 7, 7],
stochastic_depth_prob=0.1,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin3D_B_Weights.KINETICS400_V1))
def swin3d_b(*, weights: Optional[Swin3D_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer3d:
"""
Constructs a swin_base architecture from
`Video Swin Transformer <https://arxiv.org/abs/2106.13230>`_.
Args:
weights (:class:`~torchvision.models.video.Swin3D_B_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.Swin3D_B_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.swin_transformer.SwinTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/swin_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.Swin3D_B_Weights
:members:
"""
weights = Swin3D_B_Weights.verify(weights)
return _swin_transformer3d(
patch_size=[2, 4, 4],
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=[8, 7, 7],
stochastic_depth_prob=0.1,
weights=weights,
progress=progress,
**kwargs,
)
```
|
================================================================================================================================
SOURCE CODE FILE: vision_transformer.py
LINES: 1
SIZE: 32.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\models\vision_transformer.py
ENCODING: utf-8
```py
import math
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from ..ops.misc import Conv2dNormActivation, MLP
from ..transforms._presets import ImageClassification, InterpolationMode
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"VisionTransformer",
"ViT_B_16_Weights",
"ViT_B_32_Weights",
"ViT_L_16_Weights",
"ViT_L_32_Weights",
"ViT_H_14_Weights",
"vit_b_16",
"vit_b_32",
"vit_l_16",
"vit_l_32",
"vit_h_14",
]
class ConvStemConfig(NamedTuple):
out_channels: int
kernel_size: int
stride: int
norm_layer: Callable[..., nn.Module] = nn.BatchNorm2d
activation_layer: Callable[..., nn.Module] = nn.ReLU
class MLPBlock(MLP):
"""Transformer MLP block."""
_version = 2
def __init__(self, in_dim: int, mlp_dim: int, dropout: float):
super().__init__(in_dim, [mlp_dim, in_dim], activation_layer=nn.GELU, inplace=None, dropout=dropout)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.normal_(m.bias, std=1e-6)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Replacing legacy MLPBlock with MLP. See https://github.com/pytorch/vision/pull/6053
for i in range(2):
for type in ["weight", "bias"]:
old_key = f"{prefix}linear_{i+1}.{type}"
new_key = f"{prefix}{3*i}.{type}"
if old_key in state_dict:
state_dict[new_key] = state_dict.pop(old_key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
class EncoderBlock(nn.Module):
"""Transformer encoder block."""
def __init__(
self,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
dropout: float,
attention_dropout: float,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
):
super().__init__()
self.num_heads = num_heads
# Attention block
self.ln_1 = norm_layer(hidden_dim)
self.self_attention = nn.MultiheadAttention(hidden_dim, num_heads, dropout=attention_dropout, batch_first=True)
self.dropout = nn.Dropout(dropout)
# MLP block
self.ln_2 = norm_layer(hidden_dim)
self.mlp = MLPBlock(hidden_dim, mlp_dim, dropout)
def forward(self, input: torch.Tensor):
torch._assert(input.dim() == 3, f"Expected (batch_size, seq_length, hidden_dim) got {input.shape}")
x = self.ln_1(input)
x, _ = self.self_attention(x, x, x, need_weights=False)
x = self.dropout(x)
x = x + input
y = self.ln_2(x)
y = self.mlp(y)
return x + y
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation."""
def __init__(
self,
seq_length: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
dropout: float,
attention_dropout: float,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
):
super().__init__()
# Note that batch_size is on the first dim because
# we have batch_first=True in nn.MultiAttention() by default
self.pos_embedding = nn.Parameter(torch.empty(1, seq_length, hidden_dim).normal_(std=0.02)) # from BERT
self.dropout = nn.Dropout(dropout)
layers: OrderedDict[str, nn.Module] = OrderedDict()
for i in range(num_layers):
layers[f"encoder_layer_{i}"] = EncoderBlock(
num_heads,
hidden_dim,
mlp_dim,
dropout,
attention_dropout,
norm_layer,
)
self.layers = nn.Sequential(layers)
self.ln = norm_layer(hidden_dim)
def forward(self, input: torch.Tensor):
torch._assert(input.dim() == 3, f"Expected (batch_size, seq_length, hidden_dim) got {input.shape}")
input = input + self.pos_embedding
return self.ln(self.layers(self.dropout(input)))
class VisionTransformer(nn.Module):
"""Vision Transformer as per https://arxiv.org/abs/2010.11929."""
def __init__(
self,
image_size: int,
patch_size: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
dropout: float = 0.0,
attention_dropout: float = 0.0,
num_classes: int = 1000,
representation_size: Optional[int] = None,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
conv_stem_configs: Optional[List[ConvStemConfig]] = None,
):
super().__init__()
_log_api_usage_once(self)
torch._assert(image_size % patch_size == 0, "Input shape indivisible by patch size!")
self.image_size = image_size
self.patch_size = patch_size
self.hidden_dim = hidden_dim
self.mlp_dim = mlp_dim
self.attention_dropout = attention_dropout
self.dropout = dropout
self.num_classes = num_classes
self.representation_size = representation_size
self.norm_layer = norm_layer
if conv_stem_configs is not None:
# As per https://arxiv.org/abs/2106.14881
seq_proj = nn.Sequential()
prev_channels = 3
for i, conv_stem_layer_config in enumerate(conv_stem_configs):
seq_proj.add_module(
f"conv_bn_relu_{i}",
Conv2dNormActivation(
in_channels=prev_channels,
out_channels=conv_stem_layer_config.out_channels,
kernel_size=conv_stem_layer_config.kernel_size,
stride=conv_stem_layer_config.stride,
norm_layer=conv_stem_layer_config.norm_layer,
activation_layer=conv_stem_layer_config.activation_layer,
),
)
prev_channels = conv_stem_layer_config.out_channels
seq_proj.add_module(
"conv_last", nn.Conv2d(in_channels=prev_channels, out_channels=hidden_dim, kernel_size=1)
)
self.conv_proj: nn.Module = seq_proj
else:
self.conv_proj = nn.Conv2d(
in_channels=3, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
)
seq_length = (image_size // patch_size) ** 2
# Add a class token
self.class_token = nn.Parameter(torch.zeros(1, 1, hidden_dim))
seq_length += 1
self.encoder = Encoder(
seq_length,
num_layers,
num_heads,
hidden_dim,
mlp_dim,
dropout,
attention_dropout,
norm_layer,
)
self.seq_length = seq_length
heads_layers: OrderedDict[str, nn.Module] = OrderedDict()
if representation_size is None:
heads_layers["head"] = nn.Linear(hidden_dim, num_classes)
else:
heads_layers["pre_logits"] = nn.Linear(hidden_dim, representation_size)
heads_layers["act"] = nn.Tanh()
heads_layers["head"] = nn.Linear(representation_size, num_classes)
self.heads = nn.Sequential(heads_layers)
if isinstance(self.conv_proj, nn.Conv2d):
# Init the patchify stem
fan_in = self.conv_proj.in_channels * self.conv_proj.kernel_size[0] * self.conv_proj.kernel_size[1]
nn.init.trunc_normal_(self.conv_proj.weight, std=math.sqrt(1 / fan_in))
if self.conv_proj.bias is not None:
nn.init.zeros_(self.conv_proj.bias)
elif self.conv_proj.conv_last is not None and isinstance(self.conv_proj.conv_last, nn.Conv2d):
# Init the last 1x1 conv of the conv stem
nn.init.normal_(
self.conv_proj.conv_last.weight, mean=0.0, std=math.sqrt(2.0 / self.conv_proj.conv_last.out_channels)
)
if self.conv_proj.conv_last.bias is not None:
nn.init.zeros_(self.conv_proj.conv_last.bias)
if hasattr(self.heads, "pre_logits") and isinstance(self.heads.pre_logits, nn.Linear):
fan_in = self.heads.pre_logits.in_features
nn.init.trunc_normal_(self.heads.pre_logits.weight, std=math.sqrt(1 / fan_in))
nn.init.zeros_(self.heads.pre_logits.bias)
if isinstance(self.heads.head, nn.Linear):
nn.init.zeros_(self.heads.head.weight)
nn.init.zeros_(self.heads.head.bias)
def _process_input(self, x: torch.Tensor) -> torch.Tensor:
n, c, h, w = x.shape
p = self.patch_size
torch._assert(h == self.image_size, f"Wrong image height! Expected {self.image_size} but got {h}!")
torch._assert(w == self.image_size, f"Wrong image width! Expected {self.image_size} but got {w}!")
n_h = h // p
n_w = w // p
# (n, c, h, w) -> (n, hidden_dim, n_h, n_w)
x = self.conv_proj(x)
# (n, hidden_dim, n_h, n_w) -> (n, hidden_dim, (n_h * n_w))
x = x.reshape(n, self.hidden_dim, n_h * n_w)
# (n, hidden_dim, (n_h * n_w)) -> (n, (n_h * n_w), hidden_dim)
# The self attention layer expects inputs in the format (N, S, E)
# where S is the source sequence length, N is the batch size, E is the
# embedding dimension
x = x.permute(0, 2, 1)
return x
def forward(self, x: torch.Tensor):
# Reshape and permute the input tensor
x = self._process_input(x)
n = x.shape[0]
# Expand the class token to the full batch
batch_class_token = self.class_token.expand(n, -1, -1)
x = torch.cat([batch_class_token, x], dim=1)
x = self.encoder(x)
# Classifier "token" as used by standard language architectures
x = x[:, 0]
x = self.heads(x)
return x
def _vision_transformer(
patch_size: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> VisionTransformer:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
assert weights.meta["min_size"][0] == weights.meta["min_size"][1]
_ovewrite_named_param(kwargs, "image_size", weights.meta["min_size"][0])
image_size = kwargs.pop("image_size", 224)
model = VisionTransformer(
image_size=image_size,
patch_size=patch_size,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
**kwargs,
)
if weights:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META: Dict[str, Any] = {
"categories": _IMAGENET_CATEGORIES,
}
_COMMON_SWAG_META = {
**_COMMON_META,
"recipe": "https://github.com/facebookresearch/SWAG",
"license": "https://github.com/facebookresearch/SWAG/blob/main/LICENSE",
}
class ViT_B_16_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vit_b_16-c867db91.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 86567656,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_16",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.072,
"acc@5": 95.318,
}
},
"_ops": 17.564,
"_file_size": 330.285,
"_docs": """
These weights were trained from scratch by using a modified version of `DeIT
<https://arxiv.org/abs/2012.12877>`_'s training recipe.
""",
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/vit_b_16_swag-9ac1b537.pth",
transforms=partial(
ImageClassification,
crop_size=384,
resize_size=384,
interpolation=InterpolationMode.BICUBIC,
),
meta={
**_COMMON_SWAG_META,
"num_params": 86859496,
"min_size": (384, 384),
"_metrics": {
"ImageNet-1K": {
"acc@1": 85.304,
"acc@5": 97.650,
}
},
"_ops": 55.484,
"_file_size": 331.398,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/vit_b_16_lc_swag-4e70ced5.pth",
transforms=partial(
ImageClassification,
crop_size=224,
resize_size=224,
interpolation=InterpolationMode.BICUBIC,
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 86567656,
"min_size": (224, 224),
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.886,
"acc@5": 96.180,
}
},
"_ops": 17.564,
"_file_size": 330.285,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_V1
class ViT_B_32_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vit_b_32-d86f8d99.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 88224232,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_32",
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.912,
"acc@5": 92.466,
}
},
"_ops": 4.409,
"_file_size": 336.604,
"_docs": """
These weights were trained from scratch by using a modified version of `DeIT
<https://arxiv.org/abs/2012.12877>`_'s training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
class ViT_L_16_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vit_l_16-852ce7e3.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=242),
meta={
**_COMMON_META,
"num_params": 304326632,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_16",
"_metrics": {
"ImageNet-1K": {
"acc@1": 79.662,
"acc@5": 94.638,
}
},
"_ops": 61.555,
"_file_size": 1161.023,
"_docs": """
These weights were trained from scratch by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/vit_l_16_swag-4f3808c9.pth",
transforms=partial(
ImageClassification,
crop_size=512,
resize_size=512,
interpolation=InterpolationMode.BICUBIC,
),
meta={
**_COMMON_SWAG_META,
"num_params": 305174504,
"min_size": (512, 512),
"_metrics": {
"ImageNet-1K": {
"acc@1": 88.064,
"acc@5": 98.512,
}
},
"_ops": 361.986,
"_file_size": 1164.258,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/vit_l_16_lc_swag-4d563306.pth",
transforms=partial(
ImageClassification,
crop_size=224,
resize_size=224,
interpolation=InterpolationMode.BICUBIC,
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 304326632,
"min_size": (224, 224),
"_metrics": {
"ImageNet-1K": {
"acc@1": 85.146,
"acc@5": 97.422,
}
},
"_ops": 61.555,
"_file_size": 1161.023,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_V1
class ViT_L_32_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/vit_l_32-c7638314.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 306535400,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_32",
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.972,
"acc@5": 93.07,
}
},
"_ops": 15.378,
"_file_size": 1169.449,
"_docs": """
These weights were trained from scratch by using a modified version of `DeIT
<https://arxiv.org/abs/2012.12877>`_'s training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
class ViT_H_14_Weights(WeightsEnum):
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/vit_h_14_swag-80465313.pth",
transforms=partial(
ImageClassification,
crop_size=518,
resize_size=518,
interpolation=InterpolationMode.BICUBIC,
),
meta={
**_COMMON_SWAG_META,
"num_params": 633470440,
"min_size": (518, 518),
"_metrics": {
"ImageNet-1K": {
"acc@1": 88.552,
"acc@5": 98.694,
}
},
"_ops": 1016.717,
"_file_size": 2416.643,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/vit_h_14_lc_swag-c1eb923e.pth",
transforms=partial(
ImageClassification,
crop_size=224,
resize_size=224,
interpolation=InterpolationMode.BICUBIC,
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 632045800,
"min_size": (224, 224),
"_metrics": {
"ImageNet-1K": {
"acc@1": 85.708,
"acc@5": 97.730,
}
},
"_ops": 167.295,
"_file_size": 2411.209,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_SWAG_E2E_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", ViT_B_16_Weights.IMAGENET1K_V1))
def vit_b_16(*, weights: Optional[ViT_B_16_Weights] = None, progress: bool = True, **kwargs: Any) -> VisionTransformer:
"""
Constructs a vit_b_16 architecture from
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
weights (:class:`~torchvision.models.ViT_B_16_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.ViT_B_16_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ViT_B_16_Weights
:members:
"""
weights = ViT_B_16_Weights.verify(weights)
return _vision_transformer(
patch_size=16,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", ViT_B_32_Weights.IMAGENET1K_V1))
def vit_b_32(*, weights: Optional[ViT_B_32_Weights] = None, progress: bool = True, **kwargs: Any) -> VisionTransformer:
"""
Constructs a vit_b_32 architecture from
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
weights (:class:`~torchvision.models.ViT_B_32_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.ViT_B_32_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ViT_B_32_Weights
:members:
"""
weights = ViT_B_32_Weights.verify(weights)
return _vision_transformer(
patch_size=32,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", ViT_L_16_Weights.IMAGENET1K_V1))
def vit_l_16(*, weights: Optional[ViT_L_16_Weights] = None, progress: bool = True, **kwargs: Any) -> VisionTransformer:
"""
Constructs a vit_l_16 architecture from
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
weights (:class:`~torchvision.models.ViT_L_16_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.ViT_L_16_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ViT_L_16_Weights
:members:
"""
weights = ViT_L_16_Weights.verify(weights)
return _vision_transformer(
patch_size=16,
num_layers=24,
num_heads=16,
hidden_dim=1024,
mlp_dim=4096,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", ViT_L_32_Weights.IMAGENET1K_V1))
def vit_l_32(*, weights: Optional[ViT_L_32_Weights] = None, progress: bool = True, **kwargs: Any) -> VisionTransformer:
"""
Constructs a vit_l_32 architecture from
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
weights (:class:`~torchvision.models.ViT_L_32_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.ViT_L_32_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ViT_L_32_Weights
:members:
"""
weights = ViT_L_32_Weights.verify(weights)
return _vision_transformer(
patch_size=32,
num_layers=24,
num_heads=16,
hidden_dim=1024,
mlp_dim=4096,
weights=weights,
progress=progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def vit_h_14(*, weights: Optional[ViT_H_14_Weights] = None, progress: bool = True, **kwargs: Any) -> VisionTransformer:
"""
Constructs a vit_h_14 architecture from
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
weights (:class:`~torchvision.models.ViT_H_14_Weights`, optional): The pretrained
weights to use. See :class:`~torchvision.models.ViT_H_14_Weights`
below for more details and possible values. By default, no pre-trained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ViT_H_14_Weights
:members:
"""
weights = ViT_H_14_Weights.verify(weights)
return _vision_transformer(
patch_size=14,
num_layers=32,
num_heads=16,
hidden_dim=1280,
mlp_dim=5120,
weights=weights,
progress=progress,
**kwargs,
)
def interpolate_embeddings(
image_size: int,
patch_size: int,
model_state: "OrderedDict[str, torch.Tensor]",
interpolation_mode: str = "bicubic",
reset_heads: bool = False,
) -> "OrderedDict[str, torch.Tensor]":
"""This function helps interpolate positional embeddings during checkpoint loading,
especially when you want to apply a pre-trained model on images with different resolution.
Args:
image_size (int): Image size of the new model.
patch_size (int): Patch size of the new model.
model_state (OrderedDict[str, torch.Tensor]): State dict of the pre-trained model.
interpolation_mode (str): The algorithm used for upsampling. Default: bicubic.
reset_heads (bool): If true, not copying the state of heads. Default: False.
Returns:
OrderedDict[str, torch.Tensor]: A state dict which can be loaded into the new model.
"""
# Shape of pos_embedding is (1, seq_length, hidden_dim)
pos_embedding = model_state["encoder.pos_embedding"]
n, seq_length, hidden_dim = pos_embedding.shape
if n != 1:
raise ValueError(f"Unexpected position embedding shape: {pos_embedding.shape}")
new_seq_length = (image_size // patch_size) ** 2 + 1
# Need to interpolate the weights for the position embedding.
# We do this by reshaping the positions embeddings to a 2d grid, performing
# an interpolation in the (h, w) space and then reshaping back to a 1d grid.
if new_seq_length != seq_length:
# The class token embedding shouldn't be interpolated, so we split it up.
seq_length -= 1
new_seq_length -= 1
pos_embedding_token = pos_embedding[:, :1, :]
pos_embedding_img = pos_embedding[:, 1:, :]
# (1, seq_length, hidden_dim) -> (1, hidden_dim, seq_length)
pos_embedding_img = pos_embedding_img.permute(0, 2, 1)
seq_length_1d = int(math.sqrt(seq_length))
if seq_length_1d * seq_length_1d != seq_length:
raise ValueError(
f"seq_length is not a perfect square! Instead got seq_length_1d * seq_length_1d = {seq_length_1d * seq_length_1d } and seq_length = {seq_length}"
)
# (1, hidden_dim, seq_length) -> (1, hidden_dim, seq_l_1d, seq_l_1d)
pos_embedding_img = pos_embedding_img.reshape(1, hidden_dim, seq_length_1d, seq_length_1d)
new_seq_length_1d = image_size // patch_size
# Perform interpolation.
# (1, hidden_dim, seq_l_1d, seq_l_1d) -> (1, hidden_dim, new_seq_l_1d, new_seq_l_1d)
new_pos_embedding_img = nn.functional.interpolate(
pos_embedding_img,
size=new_seq_length_1d,
mode=interpolation_mode,
align_corners=True,
)
# (1, hidden_dim, new_seq_l_1d, new_seq_l_1d) -> (1, hidden_dim, new_seq_length)
new_pos_embedding_img = new_pos_embedding_img.reshape(1, hidden_dim, new_seq_length)
# (1, hidden_dim, new_seq_length) -> (1, new_seq_length, hidden_dim)
new_pos_embedding_img = new_pos_embedding_img.permute(0, 2, 1)
new_pos_embedding = torch.cat([pos_embedding_token, new_pos_embedding_img], dim=1)
model_state["encoder.pos_embedding"] = new_pos_embedding
if reset_heads:
model_state_copy: "OrderedDict[str, torch.Tensor]" = OrderedDict()
for k, v in model_state.items():
if not k.startswith("heads"):
model_state_copy[k] = v
model_state = model_state_copy
return model_state
```
|
===================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\__init__.py
ENCODING: utf-8
```py
from ._register_onnx_ops import _register_custom_op
from .boxes import (
batched_nms,
box_area,
box_convert,
box_iou,
clip_boxes_to_image,
complete_box_iou,
distance_box_iou,
generalized_box_iou,
masks_to_boxes,
nms,
remove_small_boxes,
)
from .ciou_loss import complete_box_iou_loss
from .deform_conv import deform_conv2d, DeformConv2d
from .diou_loss import distance_box_iou_loss
from .drop_block import drop_block2d, drop_block3d, DropBlock2d, DropBlock3d
from .feature_pyramid_network import FeaturePyramidNetwork
from .focal_loss import sigmoid_focal_loss
from .giou_loss import generalized_box_iou_loss
from .misc import Conv2dNormActivation, Conv3dNormActivation, FrozenBatchNorm2d, MLP, Permute, SqueezeExcitation
from .poolers import MultiScaleRoIAlign
from .ps_roi_align import ps_roi_align, PSRoIAlign
from .ps_roi_pool import ps_roi_pool, PSRoIPool
from .roi_align import roi_align, RoIAlign
from .roi_pool import roi_pool, RoIPool
from .stochastic_depth import stochastic_depth, StochasticDepth
_register_custom_op()
__all__ = [
"masks_to_boxes",
"deform_conv2d",
"DeformConv2d",
"nms",
"batched_nms",
"remove_small_boxes",
"clip_boxes_to_image",
"box_convert",
"box_area",
"box_iou",
"generalized_box_iou",
"distance_box_iou",
"complete_box_iou",
"roi_align",
"RoIAlign",
"roi_pool",
"RoIPool",
"ps_roi_align",
"PSRoIAlign",
"ps_roi_pool",
"PSRoIPool",
"MultiScaleRoIAlign",
"FeaturePyramidNetwork",
"sigmoid_focal_loss",
"stochastic_depth",
"StochasticDepth",
"FrozenBatchNorm2d",
"Conv2dNormActivation",
"Conv3dNormActivation",
"SqueezeExcitation",
"MLP",
"Permute",
"generalized_box_iou_loss",
"distance_box_iou_loss",
"complete_box_iou_loss",
"drop_block2d",
"DropBlock2d",
"drop_block3d",
"DropBlock3d",
]
```
|
=======================================================================================================================
SOURCE CODE FILE: _box_convert.py
LINES: 1
SIZE: 2.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\_box_convert.py
ENCODING: utf-8
```py
import torch
from torch import Tensor
def _box_cxcywh_to_xyxy(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (cx, cy, w, h) format to (x1, y1, x2, y2) format.
(cx, cy) refers to center of bounding box
(w, h) are width and height of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (cx, cy, w, h) format which will be converted.
Returns:
boxes (Tensor(N, 4)): boxes in (x1, y1, x2, y2) format.
"""
# We need to change all 4 of them so some temporary variable is needed.
cx, cy, w, h = boxes.unbind(-1)
x1 = cx - 0.5 * w
y1 = cy - 0.5 * h
x2 = cx + 0.5 * w
y2 = cy + 0.5 * h
boxes = torch.stack((x1, y1, x2, y2), dim=-1)
return boxes
def _box_xyxy_to_cxcywh(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x1, y1, x2, y2) format to (cx, cy, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format which will be converted.
Returns:
boxes (Tensor(N, 4)): boxes in (cx, cy, w, h) format.
"""
x1, y1, x2, y2 = boxes.unbind(-1)
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
boxes = torch.stack((cx, cy, w, h), dim=-1)
return boxes
def _box_xywh_to_xyxy(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x, y, w, h) format to (x1, y1, x2, y2) format.
(x, y) refers to top left of bounding box.
(w, h) refers to width and height of box.
Args:
boxes (Tensor[N, 4]): boxes in (x, y, w, h) which will be converted.
Returns:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format.
"""
x, y, w, h = boxes.unbind(-1)
boxes = torch.stack([x, y, x + w, y + h], dim=-1)
return boxes
def _box_xyxy_to_xywh(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x1, y1, x2, y2) format to (x, y, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) which will be converted.
Returns:
boxes (Tensor[N, 4]): boxes in (x, y, w, h) format.
"""
x1, y1, x2, y2 = boxes.unbind(-1)
w = x2 - x1 # x2 - x1
h = y2 - y1 # y2 - y1
boxes = torch.stack((x1, y1, w, h), dim=-1)
return boxes
```
|
=============================================================================================================================
SOURCE CODE FILE: _register_onnx_ops.py
LINES: 1
SIZE: 4.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\_register_onnx_ops.py
ENCODING: utf-8
```py
import sys
import warnings
import torch
from torch.onnx import symbolic_opset11 as opset11
from torch.onnx.symbolic_helper import parse_args
_ONNX_OPSET_VERSION_11 = 11
_ONNX_OPSET_VERSION_16 = 16
BASE_ONNX_OPSET_VERSION = _ONNX_OPSET_VERSION_11
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
boxes = opset11.unsqueeze(g, boxes, 0)
scores = opset11.unsqueeze(g, opset11.unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op("Constant", value_t=torch.tensor([sys.maxsize], dtype=torch.long))
iou_threshold = g.op("Constant", value_t=torch.tensor([iou_threshold], dtype=torch.float))
# Cast boxes and scores to float32 in case they are float64 inputs
nms_out = g.op(
"NonMaxSuppression",
g.op("Cast", boxes, to_i=torch.onnx.TensorProtoDataType.FLOAT),
g.op("Cast", scores, to_i=torch.onnx.TensorProtoDataType.FLOAT),
max_output_per_class,
iou_threshold,
)
return opset11.squeeze(
g, opset11.select(g, nms_out, 1, g.op("Constant", value_t=torch.tensor([2], dtype=torch.long))), 1
)
def _process_batch_indices_for_roi_align(g, rois):
indices = opset11.squeeze(
g, opset11.select(g, rois, 1, g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))), 1
)
return g.op("Cast", indices, to_i=torch.onnx.TensorProtoDataType.INT64)
def _process_rois_for_roi_align(g, rois):
return opset11.select(g, rois, 1, g.op("Constant", value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
def _process_sampling_ratio_for_roi_align(g, sampling_ratio: int):
if sampling_ratio < 0:
warnings.warn(
"ONNX export for RoIAlign with a non-zero sampling_ratio is not supported. "
"The model will be exported with a sampling_ratio of 0."
)
sampling_ratio = 0
return sampling_ratio
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset11(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
if aligned:
warnings.warn(
"ROIAlign with aligned=True is only supported in opset >= 16. "
"Please export with opset 16 or higher, or use aligned=False."
)
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset16(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
coordinate_transformation_mode = "half_pixel" if aligned else "output_half_pixel"
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
coordinate_transformation_mode_s=coordinate_transformation_mode,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i")
def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
roi_pool = g.op(
"MaxRoiPool", input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale
)
return roi_pool, None
def _register_custom_op():
torch.onnx.register_custom_op_symbolic("torchvision::nms", symbolic_multi_label_nms, _ONNX_OPSET_VERSION_11)
torch.onnx.register_custom_op_symbolic("torchvision::roi_align", roi_align_opset11, _ONNX_OPSET_VERSION_11)
torch.onnx.register_custom_op_symbolic("torchvision::roi_align", roi_align_opset16, _ONNX_OPSET_VERSION_16)
torch.onnx.register_custom_op_symbolic("torchvision::roi_pool", roi_pool, _ONNX_OPSET_VERSION_11)
```
|
=================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 3.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\_utils.py
ENCODING: utf-8
```py
from typing import List, Optional, Tuple, Union
import torch
from torch import nn, Tensor
def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor:
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
# TODO add back the assert
# assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
concat_boxes = _cat([b for b in boxes], dim=0)
temp = []
for i, b in enumerate(boxes):
temp.append(torch.full_like(b[:, :1], i))
ids = _cat(temp, dim=0)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
torch._assert(
_tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
)
elif isinstance(boxes, torch.Tensor):
torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]")
else:
torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]")
return
def split_normalization_params(
model: nn.Module, norm_classes: Optional[List[type]] = None
) -> Tuple[List[Tensor], List[Tensor]]:
# Adapted from https://github.com/facebookresearch/ClassyVision/blob/659d7f78/classy_vision/generic/util.py#L501
if not norm_classes:
norm_classes = [
nn.modules.batchnorm._BatchNorm,
nn.LayerNorm,
nn.GroupNorm,
nn.modules.instancenorm._InstanceNorm,
nn.LocalResponseNorm,
]
for t in norm_classes:
if not issubclass(t, nn.Module):
raise ValueError(f"Class {t} is not a subclass of nn.Module.")
classes = tuple(norm_classes)
norm_params = []
other_params = []
for module in model.modules():
if next(module.children(), None):
other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad)
elif isinstance(module, classes):
norm_params.extend(p for p in module.parameters() if p.requires_grad)
else:
other_params.extend(p for p in module.parameters() if p.requires_grad)
return norm_params, other_params
def _upcast(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
def _upcast_non_float(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.dtype not in (torch.float32, torch.float64):
return t.float()
return t
def _loss_inter_union(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsctk = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
return intsctk, unionk
```
|
================================================================================================================
SOURCE CODE FILE: boxes.py
LINES: 1
SIZE: 16.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\boxes.py
ENCODING: utf-8
```py
from typing import Tuple
import torch
import torchvision
from torch import Tensor
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._box_convert import _box_cxcywh_to_xyxy, _box_xywh_to_xyxy, _box_xyxy_to_cxcywh, _box_xyxy_to_xywh
from ._utils import _upcast
def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
"""
Performs non-maximum suppression (NMS) on the boxes according
to their intersection-over-union (IoU).
NMS iteratively removes lower scoring boxes which have an
IoU greater than ``iou_threshold`` with another (higher scoring)
box.
If multiple boxes have the exact same score and satisfy the IoU
criterion with respect to a reference box, the selected box is
not guaranteed to be the same between CPU and GPU. This is similar
to the behavior of argsort in PyTorch when repeated values are present.
Args:
boxes (Tensor[N, 4])): boxes to perform NMS on. They
are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and
``0 <= y1 < y2``.
scores (Tensor[N]): scores for each one of the boxes
iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold
Returns:
Tensor: int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(nms)
_assert_has_ops()
return torch.ops.torchvision.nms(boxes, scores, iou_threshold)
def batched_nms(
boxes: Tensor,
scores: Tensor,
idxs: Tensor,
iou_threshold: float,
) -> Tensor:
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 4]): boxes where NMS will be performed. They
are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and
``0 <= y1 < y2``.
scores (Tensor[N]): scores for each one of the boxes
idxs (Tensor[N]): indices of the categories for each one of the boxes.
iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold
Returns:
Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted
in decreasing order of scores
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(batched_nms)
# Benchmarks that drove the following thresholds are at
# https://github.com/pytorch/vision/issues/1311#issuecomment-781329339
# and https://github.com/pytorch/vision/pull/8925
if boxes.numel() > (4000 if boxes.device.type == "cpu" else 100_000) and not torchvision._is_tracing():
return _batched_nms_vanilla(boxes, scores, idxs, iou_threshold)
else:
return _batched_nms_coordinate_trick(boxes, scores, idxs, iou_threshold)
@torch.jit._script_if_tracing
def _batched_nms_coordinate_trick(
boxes: Tensor,
scores: Tensor,
idxs: Tensor,
iou_threshold: float,
) -> Tensor:
# strategy: in order to perform NMS independently per class,
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
return keep
@torch.jit._script_if_tracing
def _batched_nms_vanilla(
boxes: Tensor,
scores: Tensor,
idxs: Tensor,
iou_threshold: float,
) -> Tensor:
# Based on Detectron2 implementation, just manually call nms() on each class independently
keep_mask = torch.zeros_like(scores, dtype=torch.bool)
for class_id in torch.unique(idxs):
curr_indices = torch.where(idxs == class_id)[0]
curr_keep_indices = nms(boxes[curr_indices], scores[curr_indices], iou_threshold)
keep_mask[curr_indices[curr_keep_indices]] = True
keep_indices = torch.where(keep_mask)[0]
return keep_indices[scores[keep_indices].sort(descending=True)[1]]
def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
"""
Remove every box from ``boxes`` which contains at least one side length
that is smaller than ``min_size``.
.. note::
For sanitizing a :class:`~torchvision.tv_tensors.BoundingBoxes` object, consider using
the transform :func:`~torchvision.transforms.v2.SanitizeBoundingBoxes` instead.
Args:
boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format
with ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
min_size (float): minimum size
Returns:
Tensor[K]: indices of the boxes that have both sides
larger than ``min_size``
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(remove_small_boxes)
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size)
keep = torch.where(keep)[0]
return keep
def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
"""
Clip boxes so that they lie inside an image of size ``size``.
.. note::
For clipping a :class:`~torchvision.tv_tensors.BoundingBoxes` object, consider using
the transform :func:`~torchvision.transforms.v2.ClampBoundingBoxes` instead.
Args:
boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format
with ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
size (Tuple[height, width]): size of the image
Returns:
Tensor[N, 4]: clipped boxes
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(clip_boxes_to_image)
dim = boxes.dim()
boxes_x = boxes[..., 0::2]
boxes_y = boxes[..., 1::2]
height, width = size
if torchvision._is_tracing():
boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))
boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device))
boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))
boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device))
else:
boxes_x = boxes_x.clamp(min=0, max=width)
boxes_y = boxes_y.clamp(min=0, max=height)
clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)
return clipped_boxes.reshape(boxes.shape)
def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor:
"""
Converts :class:`torch.Tensor` boxes from a given ``in_fmt`` to ``out_fmt``.
.. note::
For converting a :class:`torch.Tensor` or a :class:`~torchvision.tv_tensors.BoundingBoxes` object
between different formats,
consider using :func:`~torchvision.transforms.v2.functional.convert_bounding_box_format` instead.
Or see the corresponding transform :func:`~torchvision.transforms.v2.ConvertBoundingBoxFormat`.
Supported ``in_fmt`` and ``out_fmt`` strings are:
``'xyxy'``: boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right.
This is the format that torchvision utilities expect.
``'xywh'``: boxes are represented via corner, width and height, x1, y2 being top left, w, h being width and height.
``'cxcywh'``: boxes are represented via centre, width and height, cx, cy being center of box, w, h
being width and height.
Args:
boxes (Tensor[N, 4]): boxes which will be converted.
in_fmt (str): Input format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh'].
out_fmt (str): Output format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh']
Returns:
Tensor[N, 4]: Boxes into converted format.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(box_convert)
allowed_fmts = ("xyxy", "xywh", "cxcywh")
if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts:
raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt")
if in_fmt == out_fmt:
return boxes.clone()
if in_fmt != "xyxy" and out_fmt != "xyxy":
# convert to xyxy and change in_fmt xyxy
if in_fmt == "xywh":
boxes = _box_xywh_to_xyxy(boxes)
elif in_fmt == "cxcywh":
boxes = _box_cxcywh_to_xyxy(boxes)
in_fmt = "xyxy"
if in_fmt == "xyxy":
if out_fmt == "xywh":
boxes = _box_xyxy_to_xywh(boxes)
elif out_fmt == "cxcywh":
boxes = _box_xyxy_to_cxcywh(boxes)
elif out_fmt == "xyxy":
if in_fmt == "xywh":
boxes = _box_xywh_to_xyxy(boxes)
elif in_fmt == "cxcywh":
boxes = _box_cxcywh_to_xyxy(boxes)
return boxes
def box_area(boxes: Tensor) -> Tensor:
"""
Computes the area of a set of bounding boxes, which are specified by their
(x1, y1, x2, y2) coordinates.
Args:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Returns:
Tensor[N]: the area for each box
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(box_area)
boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def _box_inter_union(boxes1: Tensor, boxes2: Tensor) -> Tuple[Tensor, Tensor]:
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = _upcast(rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
return inter, union
def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
"""
Return intersection-over-union (Jaccard index) between two sets of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[M, 4]): second set of boxes
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(box_iou)
inter, union = _box_inter_union(boxes1, boxes2)
iou = inter / union
return iou
# Implementation adapted from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py
def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
"""
Return generalized intersection-over-union (Jaccard index) between two sets of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[M, 4]): second set of boxes
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values
for every element in boxes1 and boxes2
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(generalized_box_iou)
inter, union = _box_inter_union(boxes1, boxes2)
iou = inter / union
lti = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rbi = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
whi = _upcast(rbi - lti).clamp(min=0) # [N,M,2]
areai = whi[:, :, 0] * whi[:, :, 1]
return iou - (areai - union) / areai
def complete_box_iou(boxes1: Tensor, boxes2: Tensor, eps: float = 1e-7) -> Tensor:
"""
Return complete intersection-over-union (Jaccard index) between two sets of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[M, 4]): second set of boxes
eps (float, optional): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values
for every element in boxes1 and boxes2
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(complete_box_iou)
boxes1 = _upcast(boxes1)
boxes2 = _upcast(boxes2)
diou, iou = _box_diou_iou(boxes1, boxes2, eps)
w_pred = boxes1[:, None, 2] - boxes1[:, None, 0]
h_pred = boxes1[:, None, 3] - boxes1[:, None, 1]
w_gt = boxes2[:, 2] - boxes2[:, 0]
h_gt = boxes2[:, 3] - boxes2[:, 1]
v = (4 / (torch.pi**2)) * torch.pow(torch.atan(w_pred / h_pred) - torch.atan(w_gt / h_gt), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
return diou - alpha * v
def distance_box_iou(boxes1: Tensor, boxes2: Tensor, eps: float = 1e-7) -> Tensor:
"""
Return distance intersection-over-union (Jaccard index) between two sets of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[M, 4]): second set of boxes
eps (float, optional): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values
for every element in boxes1 and boxes2
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(distance_box_iou)
boxes1 = _upcast(boxes1)
boxes2 = _upcast(boxes2)
diou, _ = _box_diou_iou(boxes1, boxes2, eps=eps)
return diou
def _box_diou_iou(boxes1: Tensor, boxes2: Tensor, eps: float = 1e-7) -> Tuple[Tensor, Tensor]:
iou = box_iou(boxes1, boxes2)
lti = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rbi = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
whi = _upcast(rbi - lti).clamp(min=0) # [N,M,2]
diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps
# centers of boxes
x_p = (boxes1[:, 0] + boxes1[:, 2]) / 2
y_p = (boxes1[:, 1] + boxes1[:, 3]) / 2
x_g = (boxes2[:, 0] + boxes2[:, 2]) / 2
y_g = (boxes2[:, 1] + boxes2[:, 3]) / 2
# The distance between boxes' centers squared.
centers_distance_squared = (_upcast((x_p[:, None] - x_g[None, :])) ** 2) + (
_upcast((y_p[:, None] - y_g[None, :])) ** 2
)
# The distance IoU is the IoU penalized by a normalized
# distance between boxes' centers squared.
return iou - (centers_distance_squared / diagonal_distance_squared), iou
def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor:
"""
Compute the bounding boxes around the provided masks.
Returns a [N, 4] tensor containing bounding boxes. The boxes are in ``(x1, y1, x2, y2)`` format with
``0 <= x1 <= x2`` and ``0 <= y1 <= y2``.
.. warning::
In most cases the output will guarantee ``x1 < x2`` and ``y1 < y2``. But
if the input is degenerate, e.g. if a mask is a single row or a single
column, then the output may have x1 = x2 or y1 = y2.
Args:
masks (Tensor[N, H, W]): masks to transform where N is the number of masks
and (H, W) are the spatial dimensions.
Returns:
Tensor[N, 4]: bounding boxes
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(masks_to_boxes)
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device, dtype=torch.float)
n = masks.shape[0]
bounding_boxes = torch.zeros((n, 4), device=masks.device, dtype=torch.float)
for index, mask in enumerate(masks):
y, x = torch.where(mask != 0)
bounding_boxes[index, 0] = torch.min(x)
bounding_boxes[index, 1] = torch.min(y)
bounding_boxes[index, 2] = torch.max(x)
bounding_boxes[index, 3] = torch.max(y)
return bounding_boxes
```
|
====================================================================================================================
SOURCE CODE FILE: ciou_loss.py
LINES: 2
SIZE: 2.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\ciou_loss.py
ENCODING: utf-8
```py
import torch
from ..utils import _log_api_usage_once
from ._utils import _upcast_non_float
from .diou_loss import _diou_iou_loss
def complete_box_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Gradient-friendly IoU loss with an additional penalty that is non-zero when the
boxes do not overlap. This loss function considers important geometrical
factors such as overlap area, normalized central point distance and aspect ratio.
This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``, and The two boxes should have the
same dimensions.
Args:
boxes1 : (Tensor[N, 4] or Tensor[4]) first set of boxes
boxes2 : (Tensor[N, 4] or Tensor[4]) second set of boxes
reduction : (string, optional) Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
applied to the output. ``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``
eps : (float): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor: Loss tensor with the reduction option applied.
Reference:
Zhaohui Zheng et al.: Complete Intersection over Union Loss:
https://arxiv.org/abs/1911.08287
"""
# Original Implementation from https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/losses.py
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(complete_box_iou_loss)
boxes1 = _upcast_non_float(boxes1)
boxes2 = _upcast_non_float(boxes2)
diou_loss, iou = _diou_iou_loss(boxes1, boxes2)
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# width and height of boxes
w_pred = x2 - x1
h_pred = y2 - y1
w_gt = x2g - x1g
h_gt = y2g - y1g
v = (4 / (torch.pi**2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
loss = diou_loss + alpha * v
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.