python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
# This script is for building AARCH64 wheels using AWS EC2 instances.
# To generate binaries for the release follow these steps:
# 1. Update mappings for each of the Domain Libraries by adding new row to a table like this: "v1.11.0": ("0.11.0", "rc1"),
# 2. Run script with following arguments for each of the supported python versions and specify required RC tag for example: v1.11.0-rc3:
# build_aarch64_wheel.py --key-name <YourPemKey> --use-docker --python 3.8 --branch <RCtag>
import boto3
import os
import subprocess
import sys
import time
from typing import Dict, List, Optional, Tuple, Union
# AMI images for us-east-1, change the following based on your ~/.aws/config
os_amis = {
'ubuntu18_04': "ami-078eece1d8119409f", # login_name: ubuntu
'ubuntu20_04': "ami-052eac90edaa9d08f", # login_name: ubuntu
'ubuntu22_04': "ami-0c6c29c5125214c77", # login_name: ubuntu
'redhat8': "ami-0698b90665a2ddcf1", # login_name: ec2-user
}
ubuntu18_04_ami = os_amis['ubuntu18_04']
def compute_keyfile_path(key_name: Optional[str] = None) -> Tuple[str, str]:
if key_name is None:
key_name = os.getenv("AWS_KEY_NAME")
if key_name is None:
return os.getenv("SSH_KEY_PATH", ""), ""
homedir_path = os.path.expanduser("~")
default_path = os.path.join(homedir_path, ".ssh", f"{key_name}.pem")
return os.getenv("SSH_KEY_PATH", default_path), key_name
ec2 = boto3.resource("ec2")
def ec2_get_instances(filter_name, filter_value):
return ec2.instances.filter(Filters=[{'Name': filter_name, 'Values': [filter_value]}])
def ec2_instances_of_type(instance_type='t4g.2xlarge'):
return ec2_get_instances('instance-type', instance_type)
def ec2_instances_by_id(instance_id):
rc = list(ec2_get_instances('instance-id', instance_id))
return rc[0] if len(rc) > 0 else None
def start_instance(key_name, ami=ubuntu18_04_ami, instance_type='t4g.2xlarge'):
inst = ec2.create_instances(ImageId=ami,
InstanceType=instance_type,
SecurityGroups=['ssh-allworld'],
KeyName=key_name,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{
'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': True,
'VolumeSize': 50,
'VolumeType': 'standard'
}
}
])[0]
print(f'Create instance {inst.id}')
inst.wait_until_running()
running_inst = ec2_instances_by_id(inst.id)
print(f'Instance started at {running_inst.public_dns_name}')
return running_inst
class RemoteHost:
addr: str
keyfile_path: str
login_name: str
container_id: Optional[str] = None
ami: Optional[str] = None
def __init__(self, addr: str, keyfile_path: str, login_name: str = 'ubuntu'):
self.addr = addr
self.keyfile_path = keyfile_path
self.login_name = login_name
def _gen_ssh_prefix(self) -> List[str]:
return ["ssh", "-o", "StrictHostKeyChecking=no", "-i", self.keyfile_path,
f"{self.login_name}@{self.addr}", "--"]
@staticmethod
def _split_cmd(args: Union[str, List[str]]) -> List[str]:
return args.split() if isinstance(args, str) else args
def run_ssh_cmd(self, args: Union[str, List[str]]) -> None:
subprocess.check_call(self._gen_ssh_prefix() + self._split_cmd(args))
def check_ssh_output(self, args: Union[str, List[str]]) -> str:
return subprocess.check_output(self._gen_ssh_prefix() + self._split_cmd(args)).decode("utf-8")
def scp_upload_file(self, local_file: str, remote_file: str) -> None:
subprocess.check_call(["scp", "-i", self.keyfile_path, local_file,
f"{self.login_name}@{self.addr}:{remote_file}"])
def scp_download_file(self, remote_file: str, local_file: Optional[str] = None) -> None:
if local_file is None:
local_file = "."
subprocess.check_call(["scp", "-i", self.keyfile_path,
f"{self.login_name}@{self.addr}:{remote_file}", local_file])
def start_docker(self, image="quay.io/pypa/manylinux2014_aarch64:latest") -> None:
self.run_ssh_cmd("sudo apt-get install -y docker.io")
self.run_ssh_cmd(f"sudo usermod -a -G docker {self.login_name}")
self.run_ssh_cmd("sudo service docker start")
self.run_ssh_cmd(f"docker pull {image}")
self.container_id = self.check_ssh_output(f"docker run -t -d -w /root {image}").strip()
def using_docker(self) -> bool:
return self.container_id is not None
def run_cmd(self, args: Union[str, List[str]]) -> None:
if not self.using_docker():
return self.run_ssh_cmd(args)
assert self.container_id is not None
docker_cmd = self._gen_ssh_prefix() + ['docker', 'exec', '-i', self.container_id, 'bash']
p = subprocess.Popen(docker_cmd, stdin=subprocess.PIPE)
p.communicate(input=" ".join(["source .bashrc && "] + self._split_cmd(args)).encode("utf-8"))
rc = p.wait()
if rc != 0:
raise subprocess.CalledProcessError(rc, docker_cmd)
def check_output(self, args: Union[str, List[str]]) -> str:
if not self.using_docker():
return self.check_ssh_output(args)
assert self.container_id is not None
docker_cmd = self._gen_ssh_prefix() + ['docker', 'exec', '-i', self.container_id, 'bash']
p = subprocess.Popen(docker_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(out, err) = p.communicate(input=" ".join(["source .bashrc && "] + self._split_cmd(args)).encode("utf-8"))
rc = p.wait()
if rc != 0:
raise subprocess.CalledProcessError(rc, docker_cmd, output=out, stderr=err)
return out.decode("utf-8")
def upload_file(self, local_file: str, remote_file: str) -> None:
if not self.using_docker():
return self.scp_upload_file(local_file, remote_file)
tmp_file = os.path.join("/tmp", os.path.basename(local_file))
self.scp_upload_file(local_file, tmp_file)
self.run_ssh_cmd(["docker", "cp", tmp_file, f"{self.container_id}:/root/{remote_file}"])
self.run_ssh_cmd(["rm", tmp_file])
def download_file(self, remote_file: str, local_file: Optional[str] = None) -> None:
if not self.using_docker():
return self.scp_download_file(remote_file, local_file)
tmp_file = os.path.join("/tmp", os.path.basename(remote_file))
self.run_ssh_cmd(["docker", "cp", f"{self.container_id}:/root/{remote_file}", tmp_file])
self.scp_download_file(tmp_file, local_file)
self.run_ssh_cmd(["rm", tmp_file])
def download_wheel(self, remote_file: str, local_file: Optional[str] = None) -> None:
if self.using_docker() and local_file is None:
basename = os.path.basename(remote_file)
local_file = basename.replace("-linux_aarch64.whl", "-manylinux2014_aarch64.whl")
self.download_file(remote_file, local_file)
def list_dir(self, path: str) -> List[str]:
return self.check_output(["ls", "-1", path]).split("\n")
def wait_for_connection(addr, port, timeout=15, attempt_cnt=5):
import socket
for i in range(attempt_cnt):
try:
with socket.create_connection((addr, port), timeout=timeout):
return
except (ConnectionRefusedError, socket.timeout):
if i == attempt_cnt - 1:
raise
time.sleep(timeout)
def update_apt_repo(host: RemoteHost) -> None:
time.sleep(5)
host.run_cmd("sudo systemctl stop apt-daily.service || true")
host.run_cmd("sudo systemctl stop unattended-upgrades.service || true")
host.run_cmd("while systemctl is-active --quiet apt-daily.service; do sleep 1; done")
host.run_cmd("while systemctl is-active --quiet unattended-upgrades.service; do sleep 1; done")
host.run_cmd("sudo apt-get update")
time.sleep(3)
host.run_cmd("sudo apt-get update")
def install_condaforge(host: RemoteHost,
suffix: str = "latest/download/Miniforge3-Linux-aarch64.sh") -> None:
print('Install conda-forge')
host.run_cmd(f"curl -OL https://github.com/conda-forge/miniforge/releases/{suffix}")
host.run_cmd(f"sh -f {os.path.basename(suffix)} -b")
host.run_cmd(f"rm -f {os.path.basename(suffix)}")
if host.using_docker():
host.run_cmd("echo 'PATH=$HOME/miniforge3/bin:$PATH'>>.bashrc")
else:
host.run_cmd(['sed', '-i', '\'/^# If not running interactively.*/i PATH=$HOME/miniforge3/bin:$PATH\'', '.bashrc'])
def install_condaforge_python(host: RemoteHost, python_version="3.8") -> None:
if python_version == "3.6":
# Python-3.6 EOLed and not compatible with conda-4.11
install_condaforge(host, suffix="download/4.10.3-10/Miniforge3-4.10.3-10-Linux-aarch64.sh")
host.run_cmd(f"conda install -y python={python_version} numpy pyyaml")
else:
install_condaforge(host, suffix="download/4.11.0-4/Miniforge3-4.11.0-4-Linux-aarch64.sh")
# Pytorch-1.10 or older are not compatible with setuptools=59.6 or newer
host.run_cmd(f"conda install -y python={python_version} numpy pyyaml setuptools>=59.5.0")
def build_OpenBLAS(host: RemoteHost, git_clone_flags: str = "") -> None:
print('Building OpenBLAS')
host.run_cmd(f"git clone https://github.com/xianyi/OpenBLAS -b v0.3.20 {git_clone_flags}")
make_flags = "NUM_THREADS=64 USE_OPENMP=1 NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=ARMV8"
host.run_cmd(f"pushd OpenBLAS && make {make_flags} -j8 && sudo make {make_flags} install && popd && rm -rf OpenBLAS")
def build_ArmComputeLibrary(host: RemoteHost, git_clone_flags: str = "") -> None:
print('Building Arm Compute Library')
acl_build_flags="debug=0 neon=1 opencl=0 os=linux openmp=1 cppthreads=0 arch=armv8a multi_isa=1 build=native"
host.run_cmd(f"git clone https://github.com/ARM-software/ComputeLibrary.git -b v23.05.1 {git_clone_flags}")
host.run_cmd(f"cd ComputeLibrary && scons Werror=1 -j8 {acl_build_flags}")
def embed_libgomp(host: RemoteHost, use_conda, wheel_name) -> None:
host.run_cmd("pip3 install auditwheel")
host.run_cmd("conda install -y patchelf" if use_conda else "sudo apt-get install -y patchelf")
from tempfile import NamedTemporaryFile
with NamedTemporaryFile() as tmp:
tmp.write(embed_library_script.encode('utf-8'))
tmp.flush()
host.upload_file(tmp.name, "embed_library.py")
print('Embedding libgomp into wheel')
if host.using_docker():
host.run_cmd(f"python3 embed_library.py {wheel_name} --update-tag")
else:
host.run_cmd(f"python3 embed_library.py {wheel_name}")
def checkout_repo(host: RemoteHost, *,
branch: str = "main",
url: str,
git_clone_flags: str,
mapping: Dict[str, Tuple[str, str]]) -> Optional[str]:
for prefix in mapping:
if not branch.startswith(prefix):
continue
tag = f"v{mapping[prefix][0]}-{mapping[prefix][1]}"
host.run_cmd(f"git clone {url} -b {tag} {git_clone_flags}")
return mapping[prefix][0]
# Map master to main
if branch == "master" and url.rsplit("/")[-1] in ['vision', 'text', 'audio', 'data']:
branch = "main"
host.run_cmd(f"git clone {url} -b {branch} {git_clone_flags}")
return None
def build_torchvision(host: RemoteHost, *,
branch: str = "main",
use_conda: bool = True,
git_clone_flags: str,
run_smoke_tests: bool = True) -> str:
print('Checking out TorchVision repo')
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/vision",
git_clone_flags=git_clone_flags,
mapping={
"v1.7.1": ("0.8.2", "rc2"),
"v1.8.0": ("0.9.0", "rc3"),
"v1.8.1": ("0.9.1", "rc1"),
"v1.9.0": ("0.10.0", "rc1"),
"v1.10.0": ("0.11.1", "rc1"),
"v1.10.1": ("0.11.2", "rc1"),
"v1.10.2": ("0.11.3", "rc1"),
"v1.11.0": ("0.12.0", "rc1"),
"v1.12.0": ("0.13.0", "rc4"),
"v1.12.1": ("0.13.1", "rc6"),
"v1.13.0": ("0.14.0", "rc4"),
"v1.13.1": ("0.14.1", "rc2"),
"v2.0.0": ("0.15.1", "rc2"),
"v2.0.1": ("0.15.2", "rc2"),
})
print("Building TorchVision wheel")
# Please note libnpg and jpeg are required to build image.so extension
if use_conda:
host.run_cmd("conda install -y libpng jpeg")
# Remove .so files to force static linking
host.run_cmd("rm miniforge3/lib/libpng.so miniforge3/lib/libpng16.so miniforge3/lib/libjpeg.so")
# And patch setup.py to include libz dependency for libpng
host.run_cmd(['sed -i -e \'s/image_link_flags\.append("png")/image_link_flags += ["png", "z"]/\' vision/setup.py'])
build_vars = ""
if branch == "nightly":
version = host.check_output(["if [ -f vision/version.txt ]; then cat vision/version.txt; fi"]).strip()
if len(version) == 0:
# In older revisions, version was embedded in setup.py
version = host.check_output(["grep", "\"version = '\"", "vision/setup.py"]).strip().split("'")[1][:-2]
build_date = host.check_output("cd vision && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd vision && {build_vars} python3 setup.py bdist_wheel")
vision_wheel_name = host.list_dir("vision/dist")[0]
embed_libgomp(host, use_conda, os.path.join('vision', 'dist', vision_wheel_name))
print('Copying TorchVision wheel')
host.download_wheel(os.path.join('vision', 'dist', vision_wheel_name))
if run_smoke_tests:
host.run_cmd(f"pip3 install {os.path.join('vision', 'dist', vision_wheel_name)}")
host.run_cmd("python3 vision/test/smoke_test.py")
print("Delete vision checkout")
host.run_cmd("rm -rf vision")
return vision_wheel_name
def build_torchdata(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> str:
print('Checking out TorchData repo')
git_clone_flags += " --recurse-submodules"
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/data",
git_clone_flags=git_clone_flags,
mapping={
"v1.13.1": ("0.5.1", ""),
"v2.0.0": ("0.6.0", "rc5"),
"v2.0.1": ("0.6.1", "rc1"),
})
print('Building TorchData wheel')
build_vars = ""
if branch == 'nightly':
version = host.check_output(["if [ -f data/version.txt ]; then cat data/version.txt; fi"]).strip()
build_date = host.check_output("cd data && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd data && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("data/dist")[0]
embed_libgomp(host, use_conda, os.path.join('data', 'dist', wheel_name))
print('Copying TorchData wheel')
host.download_wheel(os.path.join('data', 'dist', wheel_name))
return wheel_name
def build_torchtext(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> str:
print('Checking out TorchText repo')
git_clone_flags += " --recurse-submodules"
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/text",
git_clone_flags=git_clone_flags,
mapping={
"v1.9.0": ("0.10.0", "rc1"),
"v1.10.0": ("0.11.0", "rc2"),
"v1.10.1": ("0.11.1", "rc1"),
"v1.10.2": ("0.11.2", "rc1"),
"v1.11.0": ("0.12.0", "rc1"),
"v1.12.0": ("0.13.0", "rc2"),
"v1.12.1": ("0.13.1", "rc5"),
"v1.13.0": ("0.14.0", "rc3"),
"v1.13.1": ("0.14.1", "rc1"),
"v2.0.0": ("0.15.1", "rc2"),
"v2.0.1": ("0.15.2", "rc2"),
})
print('Building TorchText wheel')
build_vars = ""
if branch == 'nightly':
version = host.check_output(["if [ -f text/version.txt ]; then cat text/version.txt; fi"]).strip()
build_date = host.check_output("cd text && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd text && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("text/dist")[0]
embed_libgomp(host, use_conda, os.path.join('text', 'dist', wheel_name))
print('Copying TorchText wheel')
host.download_wheel(os.path.join('text', 'dist', wheel_name))
return wheel_name
def build_torchaudio(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> str:
print('Checking out TorchAudio repo')
git_clone_flags += " --recurse-submodules"
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/audio",
git_clone_flags=git_clone_flags,
mapping={
"v1.9.0": ("0.9.0", "rc2"),
"v1.10.0": ("0.10.0", "rc5"),
"v1.10.1": ("0.10.1", "rc1"),
"v1.10.2": ("0.10.2", "rc1"),
"v1.11.0": ("0.11.0", "rc1"),
"v1.12.0": ("0.12.0", "rc3"),
"v1.12.1": ("0.12.1", "rc5"),
"v1.13.0": ("0.13.0", "rc4"),
"v1.13.1": ("0.13.1", "rc2"),
"v2.0.0": ("2.0.1", "rc3"),
"v2.0.1": ("2.0.2", "rc2"),
})
print('Building TorchAudio wheel')
build_vars = ""
if branch == 'nightly':
version = host.check_output(["grep", "\"version = '\"", "audio/setup.py"]).strip().split("'")[1][:-2]
build_date = host.check_output("cd audio && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \
&& ./packaging/ffmpeg/build.sh \
&& {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("audio/dist")[0]
embed_libgomp(host, use_conda, os.path.join('audio', 'dist', wheel_name))
print('Copying TorchAudio wheel')
host.download_wheel(os.path.join('audio', 'dist', wheel_name))
return wheel_name
def configure_system(host: RemoteHost, *,
compiler: str = "gcc-8",
use_conda: bool = True,
python_version: str = "3.8") -> None:
if use_conda:
install_condaforge_python(host, python_version)
print('Configuring the system')
if not host.using_docker():
update_apt_repo(host)
host.run_cmd("sudo apt-get install -y ninja-build g++ git cmake gfortran unzip")
else:
host.run_cmd("yum install -y sudo")
host.run_cmd("conda install -y ninja scons")
if not use_conda:
host.run_cmd("sudo apt-get install -y python3-dev python3-yaml python3-setuptools python3-wheel python3-pip")
host.run_cmd("pip3 install dataclasses typing-extensions")
# Install and switch to gcc-8 on Ubuntu-18.04
if not host.using_docker() and host.ami == ubuntu18_04_ami and compiler == 'gcc-8':
host.run_cmd("sudo apt-get install -y g++-8 gfortran-8")
host.run_cmd("sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 100")
host.run_cmd("sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 100")
host.run_cmd("sudo update-alternatives --install /usr/bin/gfortran gfortran /usr/bin/gfortran-8 100")
if not use_conda:
print("Installing Cython + numpy from PyPy")
host.run_cmd("sudo pip3 install Cython")
host.run_cmd("sudo pip3 install numpy")
def build_domains(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> Tuple[str, str, str, str]:
vision_wheel_name = build_torchvision(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
audio_wheel_name = build_torchaudio(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
data_wheel_name = build_torchdata(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
text_wheel_name = build_torchtext(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
return (vision_wheel_name, audio_wheel_name, data_wheel_name, text_wheel_name)
def start_build(host: RemoteHost, *,
branch: str = "master",
compiler: str = "gcc-8",
use_conda: bool = True,
python_version: str = "3.8",
pytorch_only: bool = False,
pytorch_build_number: Optional[str] = None,
shallow_clone: bool = True,
enable_mkldnn: bool = False) -> Tuple[str, str, str, str, str]:
git_clone_flags = " --depth 1 --shallow-submodules" if shallow_clone else ""
if host.using_docker() and not use_conda:
print("Auto-selecting conda option for docker images")
use_conda = True
if not host.using_docker():
print("Disable mkldnn for host builds")
enable_mkldnn = False
configure_system(host,
compiler=compiler,
use_conda=use_conda,
python_version=python_version)
build_OpenBLAS(host, git_clone_flags)
if host.using_docker():
print("Move libgfortant.a into a standard location")
# HACK: pypa gforntran.a is compiled without PIC, which leads to the following error
# libgfortran.a(error.o)(.text._gfortrani_st_printf+0x34): unresolvable R_AARCH64_ADR_PREL_PG_HI21 relocation against symbol `__stack_chk_guard@@GLIBC_2.17'
# Workaround by copying gfortran library from the host
host.run_ssh_cmd("sudo apt-get install -y gfortran-8")
host.run_cmd("mkdir -p /usr/lib/gcc/aarch64-linux-gnu/8")
host.run_ssh_cmd(["docker", "cp", "/usr/lib/gcc/aarch64-linux-gnu/8/libgfortran.a",
f"{host.container_id}:/opt/rh/devtoolset-10/root/usr/lib/gcc/aarch64-redhat-linux/10/"
])
print('Checking out PyTorch repo')
host.run_cmd(f"git clone --recurse-submodules -b {branch} https://github.com/pytorch/pytorch {git_clone_flags}")
print('Building PyTorch wheel')
build_opts = ""
if pytorch_build_number is not None:
build_opts += f" --build-number {pytorch_build_number}"
# Breakpad build fails on aarch64
build_vars = "USE_BREAKPAD=0 "
if branch == 'nightly':
build_date = host.check_output("cd pytorch && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
version = host.check_output("cat pytorch/version.txt").strip()[:-2]
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1"
if branch.startswith("v1.") or branch.startswith("v2."):
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
if enable_mkldnn:
build_ArmComputeLibrary(host, git_clone_flags)
print("build pytorch with mkldnn+acl backend")
build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON"
host.run_cmd(f"cd pytorch && export ACL_ROOT_DIR=$HOME/ComputeLibrary && {build_vars} python3 setup.py bdist_wheel{build_opts}")
print('Repair the wheel')
pytorch_wheel_name = host.list_dir("pytorch/dist")[0]
host.run_cmd(f"export LD_LIBRARY_PATH=$HOME/acl/build:$HOME/pytorch/build/lib && auditwheel repair $HOME/pytorch/dist/{pytorch_wheel_name}")
print('replace the original wheel with the repaired one')
pytorch_repaired_wheel_name = host.list_dir("wheelhouse")[0]
host.run_cmd(f"cp $HOME/wheelhouse/{pytorch_repaired_wheel_name} $HOME/pytorch/dist/{pytorch_wheel_name}")
else:
print("build pytorch without mkldnn backend")
host.run_cmd(f"cd pytorch && {build_vars} python3 setup.py bdist_wheel{build_opts}")
print("Deleting build folder")
host.run_cmd("cd pytorch && rm -rf build")
pytorch_wheel_name = host.list_dir("pytorch/dist")[0]
embed_libgomp(host, use_conda, os.path.join('pytorch', 'dist', pytorch_wheel_name))
print('Copying the wheel')
host.download_wheel(os.path.join('pytorch', 'dist', pytorch_wheel_name))
print('Installing PyTorch wheel')
host.run_cmd(f"pip3 install pytorch/dist/{pytorch_wheel_name}")
if pytorch_only:
return (pytorch_wheel_name, None, None, None, None)
domain_wheels = build_domains(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
return (pytorch_wheel_name, *domain_wheels)
embed_library_script = """
#!/usr/bin/env python3
from auditwheel.patcher import Patchelf
from auditwheel.wheeltools import InWheelCtx
from auditwheel.elfutils import elf_file_filter
from auditwheel.repair import copylib
from auditwheel.lddtree import lddtree
from subprocess import check_call
import os
import shutil
import sys
from tempfile import TemporaryDirectory
def replace_tag(filename):
with open(filename, 'r') as f:
lines = f.read().split("\\n")
for i,line in enumerate(lines):
if not line.startswith("Tag: "):
continue
lines[i] = line.replace("-linux_", "-manylinux2014_")
print(f'Updated tag from {line} to {lines[i]}')
with open(filename, 'w') as f:
f.write("\\n".join(lines))
class AlignedPatchelf(Patchelf):
def set_soname(self, file_name: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--set-soname', new_soname, file_name])
def replace_needed(self, file_name: str, soname: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--replace-needed', soname, new_soname, file_name])
def embed_library(whl_path, lib_soname, update_tag=False):
patcher = AlignedPatchelf()
out_dir = TemporaryDirectory()
whl_name = os.path.basename(whl_path)
tmp_whl_name = os.path.join(out_dir.name, whl_name)
with InWheelCtx(whl_path) as ctx:
torchlib_path = os.path.join(ctx._tmpdir.name, 'torch', 'lib')
ctx.out_wheel=tmp_whl_name
new_lib_path, new_lib_soname = None, None
for filename, elf in elf_file_filter(ctx.iter_files()):
if not filename.startswith('torch/lib'):
continue
libtree = lddtree(filename)
if lib_soname not in libtree['needed']:
continue
lib_path = libtree['libs'][lib_soname]['path']
if lib_path is None:
print(f"Can't embed {lib_soname} as it could not be found")
break
if lib_path.startswith(torchlib_path):
continue
if new_lib_path is None:
new_lib_soname, new_lib_path = copylib(lib_path, torchlib_path, patcher)
patcher.replace_needed(filename, lib_soname, new_lib_soname)
print(f'Replacing {lib_soname} with {new_lib_soname} for {filename}')
if update_tag:
# Add manylinux2014 tag
for filename in ctx.iter_files():
if os.path.basename(filename) != 'WHEEL':
continue
replace_tag(filename)
shutil.move(tmp_whl_name, whl_path)
if __name__ == '__main__':
embed_library(sys.argv[1], 'libgomp.so.1', len(sys.argv) > 2 and sys.argv[2] == '--update-tag')
"""
def run_tests(host: RemoteHost, whl: str, branch='master') -> None:
print('Configuring the system')
update_apt_repo(host)
host.run_cmd("sudo apt-get install -y python3-pip git")
host.run_cmd("sudo pip3 install Cython")
host.run_cmd("sudo pip3 install numpy")
host.upload_file(whl, ".")
host.run_cmd(f"sudo pip3 install {whl}")
host.run_cmd("python3 -c 'import torch;print(torch.rand((3,3))'")
host.run_cmd(f"git clone -b {branch} https://github.com/pytorch/pytorch")
host.run_cmd("cd pytorch/test; python3 test_torch.py -v")
def get_instance_name(instance) -> Optional[str]:
if instance.tags is None:
return None
for tag in instance.tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
def list_instances(instance_type: str) -> None:
print(f"All instances of type {instance_type}")
for instance in ec2_instances_of_type(instance_type):
print(f"{instance.id} {get_instance_name(instance)} {instance.public_dns_name} {instance.state['Name']}")
def terminate_instances(instance_type: str) -> None:
print(f"Terminating all instances of type {instance_type}")
instances = list(ec2_instances_of_type(instance_type))
for instance in instances:
print(f"Terminating {instance.id}")
instance.terminate()
print("Waiting for termination to complete")
for instance in instances:
instance.wait_until_terminated()
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser("Builid and test AARCH64 wheels using EC2")
parser.add_argument("--key-name", type=str)
parser.add_argument("--debug", action="store_true")
parser.add_argument("--build-only", action="store_true")
parser.add_argument("--test-only", type=str)
parser.add_argument("--os", type=str, choices=list(os_amis.keys()), default='ubuntu20_04')
parser.add_argument("--python-version", type=str, choices=['3.6', '3.7', '3.8', '3.9', '3.10', '3.11'], default=None)
parser.add_argument("--alloc-instance", action="store_true")
parser.add_argument("--list-instances", action="store_true")
parser.add_argument("--pytorch-only", action="store_true")
parser.add_argument("--keep-running", action="store_true")
parser.add_argument("--terminate-instances", action="store_true")
parser.add_argument("--instance-type", type=str, default="t4g.2xlarge")
parser.add_argument("--branch", type=str, default="master")
parser.add_argument("--use-docker", action="store_true")
parser.add_argument("--compiler", type=str, choices=['gcc-7', 'gcc-8', 'gcc-9', 'clang'], default="gcc-8")
parser.add_argument("--use-torch-from-pypi", action="store_true")
parser.add_argument("--pytorch-build-number", type=str, default=None)
parser.add_argument("--disable-mkldnn", action="store_true")
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
ami = os_amis[args.os]
keyfile_path, key_name = compute_keyfile_path(args.key_name)
if args.list_instances:
list_instances(args.instance_type)
sys.exit(0)
if args.terminate_instances:
terminate_instances(args.instance_type)
sys.exit(0)
if len(key_name) == 0:
raise Exception("""
Cannot start build without key_name, please specify
--key-name argument or AWS_KEY_NAME environment variable.""")
if len(keyfile_path) == 0 or not os.path.exists(keyfile_path):
raise Exception(f"""
Cannot find keyfile with name: [{key_name}] in path: [{keyfile_path}], please
check `~/.ssh/` folder or manually set SSH_KEY_PATH environment variable.""")
# Starting the instance
inst = start_instance(key_name, ami=ami, instance_type=args.instance_type)
instance_name = f'{args.key_name}-{args.os}'
if args.python_version is not None:
instance_name += f'-py{args.python_version}'
inst.create_tags(DryRun=False, Tags=[{
'Key': 'Name',
'Value': instance_name,
}])
addr = inst.public_dns_name
wait_for_connection(addr, 22)
host = RemoteHost(addr, keyfile_path)
host.ami = ami
if args.use_docker:
update_apt_repo(host)
host.start_docker()
if args.test_only:
run_tests(host, args.test_only)
sys.exit(0)
if args.alloc_instance:
if args.python_version is None:
sys.exit(0)
install_condaforge_python(host, args.python_version)
sys.exit(0)
python_version = args.python_version if args.python_version is not None else '3.8'
if args.use_torch_from_pypi:
configure_system(host,
compiler=args.compiler,
python_version=python_version)
print("Installing PyTorch wheel")
host.run_cmd("pip3 install torch")
build_domains(host,
branch=args.branch,
git_clone_flags=" --depth 1 --shallow-submodules")
else:
start_build(host,
branch=args.branch,
compiler=args.compiler,
python_version=python_version,
pytorch_only=args.pytorch_only,
pytorch_build_number=args.pytorch_build_number,
enable_mkldnn=not args.disable_mkldnn)
if not args.keep_running:
print(f'Waiting for instance {inst.id} to terminate')
inst.terminate()
inst.wait_until_terminated()
|
#!/usr/bin/env python3
# encoding: UTF-8
import os
import subprocess
from pygit2 import Repository
from typing import List
def list_dir(path: str) -> List[str]:
''''
Helper for getting paths for Python
'''
return subprocess.check_output(["ls", "-1", path]).decode().split("\n")
def build_ArmComputeLibrary(git_clone_flags: str = "") -> None:
'''
Using ArmComputeLibrary for aarch64 PyTorch
'''
print('Building Arm Compute Library')
os.system("cd / && mkdir /acl")
os.system(f"git clone https://github.com/ARM-software/ComputeLibrary.git -b v23.05.1 {git_clone_flags}")
os.system('sed -i -e \'s/"armv8.2-a"/"armv8-a"/g\' ComputeLibrary/SConscript; '
'sed -i -e \'s/-march=armv8.2-a+fp16/-march=armv8-a/g\' ComputeLibrary/SConstruct; '
'sed -i -e \'s/"-march=armv8.2-a"/"-march=armv8-a"/g\' ComputeLibrary/filedefs.json')
os.system("cd ComputeLibrary; export acl_install_dir=/acl; "
"scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux openmp=1 cppthreads=0 arch=armv8.2-a multi_isa=1 build=native build_dir=$acl_install_dir/build; "
"cp -r arm_compute $acl_install_dir; "
"cp -r include $acl_install_dir; "
"cp -r utils $acl_install_dir; "
"cp -r support $acl_install_dir; "
"cp -r src $acl_install_dir; cd /")
def complete_wheel(folder: str):
'''
Complete wheel build and put in artifact location
'''
wheel_name = list_dir(f"/{folder}/dist")[0]
if "pytorch" in folder:
print("Repairing Wheel with AuditWheel")
os.system(f"cd /{folder}; auditwheel repair dist/{wheel_name}")
repaired_wheel_name = list_dir(f"/{folder}/wheelhouse")[0]
print(f"Moving {repaired_wheel_name} wheel to /{folder}/dist")
os.system(f"mv /{folder}/wheelhouse/{repaired_wheel_name} /{folder}/dist/")
else:
repaired_wheel_name = wheel_name
print(f"Copying {repaired_wheel_name} to artfacts")
os.system(f"mv /{folder}/dist/{repaired_wheel_name} /artifacts/")
return repaired_wheel_name
def parse_arguments():
'''
Parse inline arguments
'''
from argparse import ArgumentParser
parser = ArgumentParser("AARCH64 wheels python CD")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--build-only", action="store_true")
parser.add_argument("--test-only", type=str)
parser.add_argument("--enable-mkldnn", action="store_true")
return parser.parse_args()
if __name__ == '__main__':
'''
Entry Point
'''
args = parse_arguments()
enable_mkldnn = args.enable_mkldnn
repo = Repository('/pytorch')
branch = repo.head.name
if branch == 'HEAD':
branch = 'master'
git_clone_flags = " --depth 1 --shallow-submodules"
print('Building PyTorch wheel')
build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
os.system("python setup.py clean")
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
if override_package_version is not None:
version = override_package_version
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version} PYTORCH_BUILD_NUMBER=1 "
else:
if branch == 'nightly' or branch == 'master':
build_date = subprocess.check_output(['git', 'log', '--pretty=format:%cs', '-1'], cwd='/pytorch').decode().replace('-', '')
version = subprocess.check_output(['cat', 'version.txt'], cwd='/pytorch').decode().strip()[:-2]
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1 "
if branch.startswith("v1.") or branch.startswith("v2."):
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1 "
if enable_mkldnn:
build_ArmComputeLibrary(git_clone_flags)
print("build pytorch with mkldnn+acl backend")
build_vars += "USE_MKLDNN=ON USE_MKLDNN_ACL=ON " \
"ACL_ROOT_DIR=/acl " \
"LD_LIBRARY_PATH=/pytorch/build/lib:/acl/build:$LD_LIBRARY_PATH " \
"ACL_INCLUDE_DIR=/acl/build " \
"ACL_LIBRARY=/acl/build "
else:
print("build pytorch without mkldnn backend")
os.system(f"cd /pytorch; {build_vars} python3 setup.py bdist_wheel")
pytorch_wheel_name = complete_wheel("pytorch")
print(f"Build Compelete. Created {pytorch_wheel_name}..")
|
#!/usr/bin/env python3
from auditwheel.patcher import Patchelf
from auditwheel.wheeltools import InWheelCtx
from auditwheel.elfutils import elf_file_filter
from auditwheel.repair import copylib
from auditwheel.lddtree import lddtree
from subprocess import check_call
import os
import shutil
import sys
from tempfile import TemporaryDirectory
def replace_tag(filename):
with open(filename, 'r') as f:
lines = f.read().split("\\n")
for i,line in enumerate(lines):
if not line.startswith("Tag: "):
continue
lines[i] = line.replace("-linux_", "-manylinux2014_")
print(f'Updated tag from {line} to {lines[i]}')
with open(filename, 'w') as f:
f.write("\\n".join(lines))
class AlignedPatchelf(Patchelf):
def set_soname(self, file_name: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--set-soname', new_soname, file_name])
def replace_needed(self, file_name: str, soname: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--replace-needed', soname, new_soname, file_name])
def embed_library(whl_path, lib_soname, update_tag=False):
patcher = AlignedPatchelf()
out_dir = TemporaryDirectory()
whl_name = os.path.basename(whl_path)
tmp_whl_name = os.path.join(out_dir.name, whl_name)
with InWheelCtx(whl_path) as ctx:
torchlib_path = os.path.join(ctx._tmpdir.name, 'torch', 'lib')
ctx.out_wheel=tmp_whl_name
new_lib_path, new_lib_soname = None, None
for filename, elf in elf_file_filter(ctx.iter_files()):
if not filename.startswith('torch/lib'):
continue
libtree = lddtree(filename)
if lib_soname not in libtree['needed']:
continue
lib_path = libtree['libs'][lib_soname]['path']
if lib_path is None:
print(f"Can't embed {lib_soname} as it could not be found")
break
if lib_path.startswith(torchlib_path):
continue
if new_lib_path is None:
new_lib_soname, new_lib_path = copylib(lib_path, torchlib_path, patcher)
patcher.replace_needed(filename, lib_soname, new_lib_soname)
print(f'Replacing {lib_soname} with {new_lib_soname} for {filename}')
if update_tag:
# Add manylinux2014 tag
for filename in ctx.iter_files():
if os.path.basename(filename) != 'WHEEL':
continue
replace_tag(filename)
shutil.move(tmp_whl_name, whl_path)
if __name__ == '__main__':
embed_library(sys.argv[1], 'libgomp.so.1', len(sys.argv) > 2 and sys.argv[2] == '--update-tag')
|
from conda.cli.python_api import Commands, run_command
from tabulate import tabulate
from datetime import datetime
import json
PLATFORMS = ["osx-64", "linux-64", "win-64"]
PYTHON_VERSIONS = ["3.10", "3.9", "3.8", "3.7"]
CUDA_CUDNN_VERSION = [
("11.7", "8.5.0"), ("cpu", None)
]
CHANNEL = "pytorch-test"
VERSION = "1.13.*"
def generate_expected_builds(platform: str) -> set:
builds = set()
for py_version in PYTHON_VERSIONS:
if platform == "osx-64":
# macos builds support cpu only.
builds.add(f"py{py_version}_0")
continue
for cuda_version, cudnn_version in CUDA_CUDNN_VERSION:
if platform == "win-64":
cudnn_version = "8"
if cuda_version == "cpu":
builds.add(f"py{py_version}_{cuda_version}_0")
else:
builds.add(f"py{py_version}_cuda{cuda_version}_cudnn{cudnn_version}_0")
return builds
def size_format(size_num) -> str:
for unit in ["", "K", "M", "G"]:
if abs(size_num) < 1024.0:
return f"{size_num:3.1f}{unit}B"
size_num /= 1024.0
return f"{size_num:3.1f}TB"
def main() -> None:
# Iterate over platform to gather build information of available conda version.
for platform in PLATFORMS:
expected_builds = generate_expected_builds(platform)
# Actual builds available in Conda
stdout, stderr, return_code = run_command(
Commands.SEARCH, f"{CHANNEL}::*[name=pytorch version={VERSION} subdir={platform}]", "--json")
if return_code != 0:
raise Exception(stderr)
available_versions = json.loads(stdout)
output_data = []
headers = ["File Name", "Date", "Size"]
actual_builds = set()
for version in available_versions["pytorch"]:
actual_builds.add(version["build"])
output_data.append((
version["fn"],
datetime.fromtimestamp(version["timestamp"] / 1000),
size_format(version["size"])
))
assert len(expected_builds) > 0, "expected builds set should not be empty."
assert expected_builds == actual_builds, (
f"Missing following builds in conda: {expected_builds.difference(actual_builds)} for platform {platform}"
)
print(f"\nSuccessfully verified following binaries are available in Conda for {platform}...")
print(tabulate(output_data, headers=headers, tablefmt="grid"))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3.7
from datetime import datetime, time
import json
import requests
import itertools
import sqlite3
import os
import sys
from typing import Callable, Dict, Generator, List, MutableSet, Optional
def get_executor_price_rate(executor):
(etype, eclass) = executor['type'], executor['resource_class']
assert etype in ['machine', 'external', 'docker', 'macos', 'runner'], f'Unexpected type {etype}:{eclass}'
if etype == 'machine':
return {
'medium': 10,
'large': 20,
'xlarge': 100,
'2xlarge': 200,
'gpu.medium': 160,
'gpu.large': 320,
'gpu.small': 80,
'windows.medium': 40,
'windows.large': 120,
'windows.xlarge': 210,
'windows.2xlarge': 500,
'windows.gpu.nvidia.medium': 500,
'gpu.nvidia.small': 160,
'gpu.nvidia.medium': 240,
'gpu.nvidia.large': 1000,
}[eclass]
if etype == 'macos':
return {
'medium': 50,
'large': 100,
}[eclass]
if etype == 'docker':
return {
'small': 5,
'medium': 10,
'medium+': 15,
'large': 20,
'xlarge': 40,
'2xlarge': 80,
'2xlarge+': 100,
}[eclass]
if etype == 'runner' or etype == 'external':
return {
'pytorch/amd-gpu': 0,
}[eclass]
raise RuntimeError(f'Undefined executor {etype}:{eclass}')
price_per_credit = 6e-4
def get_circleci_token() -> str:
token_file_path = os.path.join(os.getenv('HOME'), '.circleci_token')
token = os.getenv('CIRCLECI_TOKEN')
if token is not None:
return token
if not os.path.exists(token_file_path):
raise RuntimeError('Can not get CirclCI token'
' neither from CIRCLECI_TOKEN environment variable,'
' nor via ~/.circleci_token file')
with open(token_file_path) as f:
return f.read().strip()
def is_workflow_in_progress(workflow: Dict) -> bool:
return workflow['status'] in ['running', 'not_run', 'failing', 'on_hold']
def str2date(val: str) -> datetime:
assert val is not None
return datetime.fromisoformat(val[:-1] if val.endswith('Z') else val)
class CircleCICache:
def __init__(self, token: Optional[str], db_name: str = 'circleci-cache.db') -> None:
file_folder = os.path.dirname(__file__)
self.url_prefix = 'https://circleci.com/api/v2'
self.session = requests.session()
self.headers = {
'Accept': 'application/json',
'Circle-Token': token,
} if token is not None else None
self.db = sqlite3.connect(os.path.join(file_folder, db_name))
self.db.execute('CREATE TABLE IF NOT EXISTS jobs(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS artifacts(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE UNIQUE INDEX IF NOT EXISTS jobs_key on jobs(slug, job_id);')
self.db.execute('CREATE TABLE IF NOT EXISTS workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipeline_workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipelines(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL, branch TEXT, revision TEXT);')
self.db.commit()
def is_offline(self) -> bool:
return self.headers is None
def _get_paged_items_list(self, url: str, params: Optional[Dict] = None, item_count: Optional[int] = -1) -> List:
rc, token, run_once = [], None, False
def _should_quit():
nonlocal run_once, rc, token
if not run_once:
run_once = True
return False
if token is None:
return True
if item_count is None:
return True
return item_count >= 0 and len(rc) >= item_count
if params is None:
params = {}
while not _should_quit():
if token is not None:
params['page-token'] = token
r = self.session.get(url, params=params, headers=self.headers)
try:
j = r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
if 'message' in j:
raise RuntimeError(f'Failed to get list from {url}: {j["message"]}')
token = j['next_page_token']
rc.extend(j['items'])
return rc
def get_pipelines(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> List:
if self.is_offline():
c = self.db.cursor()
cmd = "SELECT json from pipelines"
if branch is not None:
cmd += f" WHERE branch='{branch}'"
if item_count is not None and item_count > 0:
cmd += f" LIMIT {item_count}"
c.execute(cmd)
return [json.loads(val[0]) for val in c.fetchall()]
rc = self._get_paged_items_list(f'{self.url_prefix}/project/{project}/pipeline', {'branch': branch} if branch is not None else {}, item_count)
for pipeline in rc:
vcs = pipeline['vcs']
pid, branch, revision, pser = pipeline['id'], vcs['branch'], vcs['revision'], json.dumps(pipeline)
self.db.execute("INSERT OR REPLACE INTO pipelines(id, branch, revision, json) VALUES (?, ?, ?, ?)", (pid, branch, revision, pser))
self.db.commit()
return rc
def get_pipeline_workflows(self, pipeline) -> List:
c = self.db.cursor()
c.execute("SELECT json FROM pipeline_workflows WHERE id=?", (pipeline,))
rc = c.fetchone()
if rc is not None:
rc = json.loads(rc[0])
if not any(is_workflow_in_progress(w) for w in rc) or self.is_offline():
return rc
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/pipeline/{pipeline}/workflow')
self.db.execute("INSERT OR REPLACE INTO pipeline_workflows(id, json) VALUES (?, ?)", (pipeline, json.dumps(rc)))
self.db.commit()
return rc
def get_workflow_jobs(self, workflow, should_cache=True) -> List:
c = self.db.cursor()
c.execute("select json from workflows where id=?", (workflow,))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/workflow/{workflow}/job')
if should_cache:
self.db.execute("INSERT INTO workflows(id, json) VALUES (?, ?)", (workflow, json.dumps(rc)))
self.db.commit()
return rc
def get_job(self, project_slug, job_number) -> Dict:
c = self.db.cursor()
c.execute("select json from jobs where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return {}
r = self.session.get(f'{self.url_prefix}/project/{project_slug}/job/{job_number}', headers=self.headers)
try:
rc = r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
self.db.execute("INSERT INTO jobs(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_job_artifacts(self, project_slug, job_number) -> List[Dict]:
c = self.db.cursor()
c.execute("select json from artifacts where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return [{}]
rc = self._get_paged_items_list(f"{self.url_prefix}/project/{project_slug}/{job_number}/artifacts")
self.db.execute("INSERT INTO artifacts(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_pipeline_jobs(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> Generator:
for pipeline in self.get_pipelines(project, branch, item_count):
for workflow in self.get_pipeline_workflows(pipeline['id']):
in_progress = is_workflow_in_progress(workflow)
for job in self.get_workflow_jobs(workflow['id'], should_cache=not in_progress):
yield (pipeline, workflow, job)
def get_jobs_summary(self, slug='gh/pytorch/pytorch', workflow='build') -> Dict:
items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/{workflow}/jobs')
return {item['name']: item for item in items}
def get_job_timeseries(self, job_name: str,
slug: str = 'gh/pytorch/pytorch',
workflow: str = 'build',
branch: Optional[str] = None) -> List:
params = {'branch': branch} if branch is not None else {}
items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/build/jobs/{job_name}', params)
return [(str2date(x['started_at']), x['duration']) for x in items if x['status'] == 'success']
def aggregate_by_day(series):
rc = {}
for (ts, val) in series:
date = datetime.combine(ts.date(), time())
valcount = [val, 1.0]
if date not in rc:
rc[date] = valcount
else:
rc[date] = [sum(x) for x in zip(rc[date], valcount)]
return [(x, rc[x][0] / rc[x][1]) for x in sorted(rc.keys())]
def filter_names(names: List[str], name_filter: Optional[str] = None) -> List[str]:
import re
if name_filter is None:
return names
filters = name_filter.split(",")
return [name for name in names if any(re.match(filter, name) for filter in filters)]
def common_prefix(names: List[str]) -> str:
if len(names) == 0 or len(names[0]) == 0:
return ''
if len(names) == 1:
return names[0]
rc = names[0][0]
while rc != names[0] and all(name.startswith(rc) for name in names[1:]):
rc = names[0][:len(rc) + 1]
return rc[:-1]
def plot_graph(name_filter: Optional[str] = None,
output_file: Optional[str] = None,
branch: Optional[str] = None) -> None:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
ci_cache = CircleCICache(token=get_circleci_token())
summary = ci_cache.get_jobs_summary()
test_jobs = [name for name in summary.keys() if name.startswith('pytorch') and 'test' in name]
filtered_jobs = filter_names(test_jobs, name_filter)
prefix = common_prefix(filtered_jobs)
if len(filtered_jobs) == 0:
print(f'Filter "{name_filter}" does not match to any of {test_jobs}')
return
series = []
labels = []
styles = [f'{color}{style}' for (style, color) in itertools.product(['-', '--', '-.', ':'], ['b', 'g', 'r', 'c', 'm', 'y', 'k'])]
fig, ax = plt.subplots()
for name in test_jobs:
label = f"{name}(p95 = {int(summary[name]['metrics']['duration_metrics']['p95']/60)} min)"
if name not in filtered_jobs:
print(label)
continue
ts = ci_cache.get_job_timeseries(name, branch=branch)
if len(ts) == 0:
print(f'{label} time series is empty!')
continue
print(f'{label} time series has {len(ts)} elements')
labels.append(label[len(prefix):])
series.append(ts)
x, y = zip(*aggregate_by_day(ts))
plt.plot(x, [i / 60.0 for i in y], styles[len(labels) % len(styles)])
plt.legend(labels, loc='upper left')
plt.title(f'{prefix} timeseries')
ax.set_ylabel("Duration (m)")
# Format date
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
# Rotate tick labels
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
if output_file is not None:
plt.savefig(output_file)
else:
plt.show()
def print_line(line: str, padding: Optional[int] = None, newline: bool = True) -> None:
if padding is not None and len(line) < padding:
line += ' ' * (padding - len(line))
print(line, end='\n' if newline else '\r', flush=True)
def fetch_status(branch=None, item_count=50):
isatty = sys.stdout.isatty()
padding = os.get_terminal_size().columns - 1 if isatty else None
ci_cache = CircleCICache(token=get_circleci_token())
print(f"About to fetch {item_count} latest pipelines against {branch if branch is not None else 'all branches'}")
pipelines = ci_cache.get_pipelines(branch=branch, item_count=item_count)
total_price, total_master_price = 0, 0
for pipeline_idx, pipeline in enumerate(pipelines):
revision = pipeline['vcs']['revision']
branch = pipeline['vcs']['branch']
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
known_job_ids = []
for workflow in workflows:
url = f'https://app.circleci.com/pipelines/github/pytorch/pytorch/{workflow["pipeline_number"]}/workflows/{workflow["id"]}'
if is_workflow_in_progress(workflow):
print_line(f'Skipping {url} name:{workflow["name"]} status:{workflow["status"]}',
newline=not sys.stdout.isatty())
continue
rerun = False
total_credits, test_credits, gpu_credits, wincpu_credits, wingpu_credits = 0, 0, 0, 0, 0
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name, job_status, job_number = job['name'], job['status'], job.get('job_number', None)
if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:
continue
if job_number is None:
print(job)
continue
if job_number in known_job_ids:
rerun = True
continue
job_info = ci_cache.get_job(job['project_slug'], job_number)
if 'executor' not in job_info:
print(f'executor not found in {job_info}')
continue
job_executor = job_info['executor']
resource_class = job_executor['resource_class']
if resource_class is None:
print(f'resource_class is none for {job_info}')
continue
job_on_gpu = 'gpu' in resource_class
job_on_win = 'windows' in resource_class
if job_status != 'infrastructure_fail':
duration = str2date(job_info['stopped_at']) - str2date(job_info['started_at'])
job_credits = get_executor_price_rate(job_executor) * int(job_info['duration']) * 1e-3 / 60
else:
job_credits, duration = 0, 0
job_cost = job_credits * price_per_credit
total_credits += job_credits
if 'test' in job_name or job_name.startswith('smoke_'):
test_credits += job_credits
elif job_on_gpu:
print(f'Running build job {job_name} on GPU!!!')
if job_on_gpu:
gpu_credits += job_credits
if job_on_win:
wingpu_credits += job_credits
if job_on_win and not job_on_gpu:
wincpu_credits += job_credits
known_job_ids.append(job_number)
print_line(f' {job_name} {job_status} {duration} ${job_cost:.2f}',
padding=padding, newline=not isatty)
# Increment totals
total_price += total_credits * price_per_credit
if branch in ['master', 'nightly', 'postnightly', 'release/1.6']:
total_master_price += total_credits * price_per_credit
# skip small jobs
if total_credits * price_per_credit < .1:
continue
workflow_status = f'[{pipeline_idx}/{len(pipelines)}]'
workflow_status += f' {url} {workflow["name"]} status:{workflow["status"]}'
workflow_status += f' price: ${total_credits * price_per_credit:.2f}'
workflow_status += ' (Rerun?)' if rerun else ''
workflow_status += f'\n\t\tdate: {workflow["created_at"]} branch:{branch} revision:{revision}'
workflow_status += f'\n\t\ttotal credits: {int(total_credits)}'
if test_credits != 0:
workflow_status += f' testing: {100 * test_credits / total_credits:.1f}%'
if gpu_credits != 0:
workflow_status += f' GPU testing: {100 * gpu_credits / total_credits:.1f}%'
if wingpu_credits != 0:
workflow_status += f' WINGPU/GPU: {100 * wingpu_credits / gpu_credits:.1f}%'
if wincpu_credits != 0:
workflow_status += f' Win CPU: {100 * wincpu_credits / total_credits:.1f}%'
workflow_status += f' Total: ${total_price:.2f} master fraction: {100 * total_master_price/ total_price:.1f}%'
print_line(workflow_status, padding=padding)
def plot_heatmap(cov_matrix, names):
import numpy as np
import matplotlib.pyplot as plt
assert cov_matrix.shape == (len(names), len(names))
fig, ax = plt.subplots()
ax.imshow(cov_matrix)
ax.set_xticks(np.arange(len(names)))
ax.set_yticks(np.arange(len(names)))
ax.set_xticklabels(names)
ax.set_yticklabels(names)
# Rotate tick labels
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
# Annotate values
for i in range(len(names)):
for j in range(len(names)):
ax.text(j, i, f'{cov_matrix[i, j]:.2f}', ha='center', va='center', color='w')
plt.show()
def filter_service_jobs(name):
if name.startswith('docker'):
return True
if name.startswith('binary'):
return True
return False
def filter_cuda_test(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
if 'test' not in name:
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
if 'cuda11' in name:
return False
# Skip VS2017 tests
if 'vs2017' in name:
return False
return 'cuda' in name and 'nogpu' not in name
def filter_cuda_build(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
return 'cuda' in name and name.endswith('build')
def filter_windows_test(name):
if filter_service_jobs(name):
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
return 'test' in name and 'windows' in name
def compute_covariance(branch='master', name_filter: Optional[Callable[[str], bool]] = None):
import numpy as np
revisions: MutableSet[str] = set()
job_summary: Dict[str, Dict[str, float]] = {}
# Extract data
print(f"Computing covariance for {branch if branch is not None else 'all branches'}")
ci_cache = CircleCICache(None)
pipelines = ci_cache.get_pipelines(branch=branch)
for pipeline in pipelines:
if pipeline['trigger']['type'] == 'schedule':
continue
revision = pipeline['vcs']['revision']
pipeline_jobs: Dict[str, float] = {}
blocked_jobs: MutableSet[str] = set()
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
for workflow in workflows:
if is_workflow_in_progress(workflow):
continue
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name = job['name']
job_status = job['status']
# Handle renames
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX2_test'
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX_test'
if job_status in ['infrastructure_fail', 'canceled']:
continue
if callable(name_filter) and not name_filter(job_name):
continue
if job_status == 'blocked':
blocked_jobs.add(job_name)
continue
if job_name in blocked_jobs:
blocked_jobs.remove(job_name)
result = 1.0 if job_status == 'success' else -1.0
pipeline_jobs[job_name] = result
# Skip build with blocked job [which usually means build failed due to the test failure]
if len(blocked_jobs) != 0:
continue
# Skip all success workflows
if all(result == 1.0 for result in pipeline_jobs.values()):
continue
revisions.add(revision)
for job_name in pipeline_jobs:
if job_name not in job_summary:
job_summary[job_name] = {}
job_summary[job_name][revision] = pipeline_jobs[job_name]
# Analyze results
job_names = sorted(job_summary.keys())
# revisions = sorted(revisions)
job_data = np.zeros((len(job_names), len(revisions)), dtype=np.float)
print(f"Number of observations: {len(revisions)}")
for job_idx, job_name in enumerate(job_names):
job_row = job_summary[job_name]
for rev_idx, revision in enumerate(revisions):
if revision in job_row:
job_data[job_idx, rev_idx] = job_row[revision]
success_rate = job_data[job_idx, ].sum(where=job_data[job_idx, ] > 0.0) / len(job_row)
present_rate = 1.0 * len(job_row) / len(revisions)
print(f"{job_name}: missing {100.0 * (1.0 - present_rate):.2f}% success rate: {100 * success_rate:.2f}%")
cov_matrix = np.corrcoef(job_data)
plot_heatmap(cov_matrix, job_names)
def print_artifacts(branch, item_count, name_filter: Callable[[str], bool]) -> None:
ci_cache = CircleCICache(token=get_circleci_token())
for pipeline, _, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):
revision = pipeline['vcs']['revision']
if not name_filter(job["name"]):
continue
job_number = job.get("job_number")
if job_number is None:
continue
artifacts = ci_cache.get_job_artifacts('gh/pytorch/pytorch', job_number)
for artifact in artifacts:
name = os.path.basename(artifact['path'])
url = artifact["url"]
print(f"{revision} {name} {url}")
def print_duration(branch, item_count, name_filter: Callable[[str], bool]) -> None:
ci_cache = CircleCICache(token=get_circleci_token())
for pipeline, workflow, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):
job_name, job_status, job_number = job['name'], job['status'], job.get("job_number")
revision = pipeline['vcs']['revision']
if not name_filter(job_name) or job_number is None:
continue
if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:
continue
started_at = str2date(job['started_at'])
stopped_at = str2date(job['stopped_at'])
duration = stopped_at - started_at
print(f"{job_name} {revision} {duration} {started_at}")
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser(description="Download and analyze circle logs")
parser.add_argument('--plot-graph', type=str, nargs='?', help="Plot job time trends", const='')
parser.add_argument('--output', type=str, help="Output file name for the graphs")
parser.add_argument('--get_artifacts', type=str)
parser.add_argument('--print-duration', type=str)
parser.add_argument('--branch', type=str)
parser.add_argument('--item_count', type=int, default=100)
parser.add_argument('--compute_covariance', choices=['cuda_test', 'cuda_build', 'windows_test'])
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
if args.get_artifacts is not None:
print_artifacts(branch=args.branch,
item_count=args.item_count,
name_filter=lambda x: args.get_artifacts in x)
sys.exit(0)
if args.print_duration is not None:
print_duration(branch=args.branch,
item_count=args.item_count,
name_filter=lambda x: args.print_duration in x)
sys.exit(0)
if args.compute_covariance is not None:
name_filter = {
'cuda_test': filter_cuda_test,
'cuda_build': filter_cuda_build,
'windows_test': filter_windows_test,
}[args.compute_covariance]
compute_covariance(branch=args.branch, name_filter=name_filter)
sys.exit(0)
if args.plot_graph is not None:
plot_graph(args.plot_graph, args.output, args.branch)
sys.exit(0)
fetch_status(branch=args.branch, item_count=args.item_count)
|
#!/usr/bin/env python3
# Tool for analyzing sizes of CUDA kernels for various GPU architectures
import os
import struct
import subprocess
import sys
from tempfile import TemporaryDirectory
from typing import Dict
# Try to auto-import elftools
try:
from elftools.elf.elffile import ELFFile
except ModuleNotFoundError:
print(f'elftools module not found, trying to install it from pip')
from pip._internal import main as pip_main
try:
pip_main(["install", "pyelftools", "--user"])
except SystemExit:
print(f'PIP installation failed, please install it manually by invoking "{sys.executable} -mpip install pyelftools --user"')
sys.exit(-1)
from elftools.elf.elffile import ELFFile
# From https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def compute_cubin_sizes(file_name, section_name='.nv_fatbin', debug=False):
with open(file_name, 'rb') as f:
elf_file = ELFFile(f)
nv_fatbin = elf_file.get_section_by_name(section_name)
if nv_fatbin is None:
return {}
data = nv_fatbin.data()
idx, offs = 0, 0
elf_sizes = {}
while offs < len(data):
(magic, version, header_size, fatbin_size) = struct.unpack('IHHL', data[offs: offs + 16])
if magic != 0xba55ed50 or version != 1:
raise RuntimeError(f"Unexpected fatbin magic {hex(magic)} or version {version}")
if debug:
print(f"Found fatbin at {offs} header_size={header_size} fatbin_size={fatbin_size}")
offs += header_size
fatbin_end = offs + fatbin_size
while offs < fatbin_end:
(kind, version, hdr_size, elf_size, empty, code_ver, sm_ver) = struct.unpack('HHILLIH', data[offs: offs + 30])
if version != 0x0101 or kind not in [1, 2]:
raise RuntimeError(f"Unexpected cubin version {hex(version)} or kind {kind}")
sm_ver = f'{"ptx" if kind == 1 else "sm"}_{sm_ver}'
if debug:
print(f" {idx}: elf_size={elf_size} code_ver={hex(code_ver)} sm={sm_ver}")
if sm_ver not in elf_sizes:
elf_sizes[sm_ver] = 0
elf_sizes[sm_ver] += elf_size
idx, offs = idx + 1, offs + hdr_size + elf_size
offs = fatbin_end
return elf_sizes
class ArFileCtx:
def __init__(self, ar_name: str) -> None:
self.ar_name = os.path.abspath(ar_name)
self._tmpdir = TemporaryDirectory()
def __enter__(self) -> str:
self._pwd = os.getcwd()
rc = self._tmpdir.__enter__()
subprocess.check_call(['ar', 'x', self.ar_name])
return rc
def __exit__(self, ex, value, tb) -> None:
os.chdir(self._pwd)
return self._tmpdir.__exit__(ex, value, tb)
def dict_add(rc: Dict[str, int], b: Dict[str, int]) -> Dict[str, int]:
for key, val in b.items():
rc[key] = (rc[key] if key in rc else 0) + val
return rc
def main():
if sys.platform != 'linux':
print('This script only works with Linux ELF files')
return
if len(sys.argv) < 2:
print(f"{sys.argv[0]} invoked without any arguments trying to infer location of libtorch_cuda")
import torch
fname = os.path.join(os.path.dirname(torch.__file__), 'lib', 'libtorch_cuda.so')
else:
fname = sys.argv[1]
if not os.path.exists(fname):
print(f"Can't find {fname}")
sys.exit(-1)
section_names = ['.nv_fatbin', '__nv_relfatbin']
results = {name: {} for name in section_names}
print(f"Analyzing {fname}")
if os.path.splitext(fname)[1] == '.a':
with ArFileCtx(fname):
for fname in os.listdir("."):
if not fname.endswith(".o"): continue
for section_name in section_names:
elf_sizes = compute_cubin_sizes(fname, section_name)
dict_add(results[section_name], elf_sizes)
else:
for section_name in ['.nv_fatbin', '__nv_relfatbin']:
dict_add(results[section_name], compute_cubin_sizes(fname, section_name))
for section_name in section_names:
elf_sizes = results[section_name]
print(f"{section_name} size {sizeof_fmt(sum(elf_sizes.values()))}")
for (sm_ver, total_size) in elf_sizes.items():
print(f" {sm_ver}: {sizeof_fmt(total_size)}")
if __name__ == '__main__':
main()
|
from collections import defaultdict
from datetime import datetime, timedelta, timezone
import gzip
import multiprocessing
import os
import re
import urllib
from tqdm import tqdm
import botocore
import boto3
S3 = boto3.resource('s3')
CLIENT = boto3.client('s3')
BUCKET = S3.Bucket('pytorch')
class CacheEntry:
_size = None
def __init__(self, download_uri: str):
self.download_uri = download_uri
self.bytes_sent = 0
@property
def os_type(self) -> str:
os_type = "linux"
if "win" in self.download_uri:
os_type = "windows"
elif "macosx" in self.download_uri:
os_type = "macos"
return os_type
@property
def target_arch(self) -> str:
target_arch = "cpu"
result = re.search(r"cu[0-9]+", self.download_uri)
if result:
target_arch = result[0]
return target_arch
@property
def package_name(self) -> str:
filename_contents = os.path.basename(self.download_uri).split('-')
return filename_contents[0]
@property
def package_version(self) -> str:
if "dev" in self.download_uri:
results = re.search(
r"[0-9]+\.[0-9]+\.[0-9]+\.dev[0-9]+",
self.download_uri
)
else:
results = re.search(
r"[0-9]+\.[0-9]+\.[0-9]+", self.download_uri
)
if not results:
raise Exception("Wtf there's no version o.O")
return results[0]
@property
def size(self) -> int:
if self._size is None:
for key in BUCKET.objects.filter(
Prefix=self.download_uri.lstrip("/")
):
self._size = key.size
if self._size is None:
raise Exception(
f"No object found for prefix {self.download_uri}"
)
return self._size
@property
def downloads(self):
return self.bytes_sent // self.size
def parse_logs(log_directory: str) -> dict:
bytes_cache = dict()
for (dirpath, _, filenames) in os.walk(log_directory):
for filename in tqdm(filenames):
with gzip.open(os.path.join(dirpath, filename), 'r') as gf:
string = gf.read().decode("utf-8")
entries = []
entries += string.splitlines()[2:]
for entry in entries:
columns = entry.split('\t')
bytes_sent = int(columns[3])
download_uri = urllib.parse.unquote(
urllib.parse.unquote(columns[7])
)
status = columns[8]
if not all([
status.startswith("2"),
download_uri.endswith((".whl", ".zip"))
]):
continue
if not bytes_cache.get(download_uri):
bytes_cache[download_uri] = CacheEntry(download_uri)
bytes_cache[download_uri].bytes_sent += bytes_sent
return bytes_cache
def output_results(bytes_cache: dict) -> None:
os_results = defaultdict(int)
arch_results = defaultdict(int)
package_results = defaultdict(lambda: defaultdict(int))
for _, val in tqdm(bytes_cache.items()):
try:
os_results[val.os_type] += val.downloads
arch_results[val.target_arch] += val.downloads
package_results[val.package_name][val.package_version] += (
val.downloads
)
except Exception:
pass
print("=-=-= Results =-=-=")
print("=-=-= OS =-=-=")
total_os_num = sum(os_results.values())
for os_type, num in os_results.items():
print(
f"\t* {os_type}: {num} ({(num/total_os_num)*100:.2f}%)"
)
print("=-=-= ARCH =-=-=")
total_arch_num = sum(arch_results.values())
for arch_type, num in arch_results.items():
print(
f"\t* {arch_type}: {num} ({(num/total_arch_num) * 100:.2f}%)"
)
print("=-=-= By Package =-=-=")
for package_name, upper_val in package_results.items():
print(f"=-=-= {package_name} =-=-=")
total_package_num = sum(upper_val.values())
for package_version, num in upper_val.items():
print(
f"\t* {package_version}: {num} ({(num/total_package_num) * 100:.2f}%)"
)
def download_logs(log_directory: str, since: float):
dt_now = datetime.now(timezone.utc)
dt_end = datetime(dt_now.year, dt_now.month, dt_now.day, tzinfo=timezone.utc)
dt_start = dt_end - timedelta(days=1, hours=1) # Add 1 hour padding to account for potentially missed logs due to timing
for key in tqdm(BUCKET.objects.filter(Prefix='cflogs')):
remote_fname = key.key
local_fname = os.path.join(log_directory, remote_fname)
# Only download things from yesterday
dt_modified = key.last_modified.replace(tzinfo=timezone.utc)
if dt_start >= dt_modified or dt_end < dt_modified:
continue
# TODO: Do this in parallel
if not os.path.exists(local_fname):
dirname = os.path.dirname(local_fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
CLIENT.download_file("pytorch", remote_fname, local_fname)
if __name__ == "__main__":
print("Downloading logs")
download_logs('cache', 1)
print("Parsing logs")
cache = parse_logs('cache/cflogs/')
print("Calculating results")
output_results(cache)
|
import argparse
import boto3
import bz2
import json
import os
import re
import requests
import pandas as pd
from datetime import datetime, timedelta
from tqdm import tqdm
from typing import Any, Dict, Optional, List
S3 = boto3.resource('s3')
CLIENT = boto3.client('s3')
BUCKET = S3.Bucket('ossci-metrics')
GITHUB_API_BASE = "https://api.github.com/"
GITHUB_COMMITS_API = "repos/pytorch/pytorch/commits"
STRF_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
CACHE_PICKLE = "cache/test_time/dataframe.pickle"
def _get_latests_git_commit_sha_list(lookback: int):
sha_since = (datetime.utcnow() - timedelta(hours = lookback)).strftime(STRF_FORMAT)
resp = requests.get(GITHUB_API_BASE + GITHUB_COMMITS_API + f"?since={sha_since}")
if resp.status_code == 200:
return [e.get('sha') for e in resp.json()]
else:
return []
def _json_to_df(data: Dict[str, Any], granularity: str) -> pd.DataFrame:
reformed_data = list()
for fname, fdata in data['files'].items():
if granularity == 'file':
reformed_data.append({
"job": data['job'],
"sha": data['sha'],
'file': fname,
'file_total_sec': fdata['total_seconds'],
})
else:
for sname, sdata in fdata['suites'].items():
if granularity == 'suite':
reformed_data.append({
"job": data['job'],
"sha": data['sha'],
'suite': sname,
'suite_total_sec': sdata['total_seconds'],
})
else:
for cname, cdata in sdata['cases'].items():
reformed_data.append({
"job": data['job'],
"sha": data['sha'],
'case': cname,
'case_status': cdata['status'],
'case_sec': cdata['seconds'],
})
df = pd.json_normalize(reformed_data)
return df
def download_stats(folder: str, lookback: int):
commit_sha_list = _get_latests_git_commit_sha_list(lookback)
for commit_sha in commit_sha_list:
for key in tqdm(BUCKET.objects.filter(Prefix=f'test_time/{commit_sha}')):
remote_fname = key.key
local_fname = os.path.join(folder, remote_fname)
# TODO: Do this in parallel
if not os.path.exists(local_fname):
dirname = os.path.dirname(local_fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
# only download when there's a cache miss
if not os.path.exists(local_fname) or not os.path.isfile(local_fname):
print(f"\nDownloading {remote_fname}...")
CLIENT.download_file("ossci-metrics", remote_fname, local_fname)
def parse_and_export_stats(folder: str, granularity: str, commit_sha_lists: Optional[List[str]] = None):
dataframe = None
for (dirpath, _, filenames) in os.walk(folder):
for filename in tqdm(filenames):
splits = dirpath.split("/")
job_name = splits[-1]
sha = splits[-2]
if not commit_sha_lists or sha in commit_sha_lists:
with bz2.open(os.path.join(dirpath, filename), 'r') as zf:
string = zf.read().decode("utf-8")
data = json.loads(string)
# create a deep json with sha and job info
data['sha'] = sha
data['job'] = job_name
df = _json_to_df(data, granularity)
dataframe = df if dataframe is None else dataframe.append(df)
return dataframe
def main():
parser = argparse.ArgumentParser(
__file__,
description="download and cache test stats locally, both raw and pandas format",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'--lookback',
type=int,
help='lookback in # of hours',
default=24,
)
parser.add_argument(
'--output',
help='output filename',
default='cache/df.pickle',
)
parser.add_argument(
'--cache_folder',
help='cache folder',
default='cache',
)
parser.add_argument(
'--granularity',
choices=['file', 'suite', 'case'],
help='granularity of stats summary',
default='file',
)
args = parser.parse_args()
lookback = args.lookback
cache_folder = args.cache_folder
output = args.output
granularity = args.granularity
print("Downloading test stats")
download_stats(cache_folder, lookback)
print("Parsing test stats and write to pd dataframe")
if not os.path.exists(output):
dataframe = parse_and_export_stats(f'{cache_folder}/test_time/', granularity)
dataframe.to_pickle(output)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from typing import Dict, List
from subprocess import check_output
import os
import sys
def get_defined_symbols(fname: str, verbose: bool = False) -> Dict[str, int]:
if verbose:
print(f"Processing {fname}...", end='', flush=True)
if sys.platform == 'darwin':
lines = check_output(['nm', '--defined-only', '-n', fname]).decode('ascii').split("\n")[:-1]
rc = {}
for idx, line in enumerate(lines):
addr, stype, name = line.split(' ')
size = 4 if idx + 1 == len(lines) else (int(lines[idx + 1].split(' ')[0], 16) - int(addr, 16))
rc[name] = size
else:
lines = check_output(['nm', '--print-size', '--defined-only', fname]).decode('ascii').split('\n')
rc = {e[3]: int(e[1], 16) for e in [line.split() for line in lines] if len(e) == 4}
if verbose:
print("done")
return rc
def get_deps(fname: str) -> List[str]:
if sys.platform == 'darwin':
rc = []
lines = check_output(['otool', '-l', fname]).decode('ascii').split("\n")[1:-1]
for idx, line in enumerate(lines):
if line.strip() != 'cmd LC_LOAD_DYLIB':
continue
path = lines[idx + 2].strip()
assert path.startswith('name')
rc.append(os.path.basename(path.split(' ')[1]))
return rc
lines = check_output(['readelf', '--dynamic', fname]).decode('ascii').split('\n')
return [line.split('[')[1][:-1] for line in lines if '(NEEDED)' in line]
def humansize(size):
if size < 1024:
return f"{size} bytes"
if size < 1024**2:
return f"{int(size/1024)} Kb"
if size < 1024**3:
return f"{size/(1024.0**2):.2f} Mb"
return f"{size/(1024.0**3):.2f} Gb"
def print_sizes(libname, depth: int = 2) -> None:
libs = [libname]
depth = 2
symbols = {os.path.basename(libname): get_defined_symbols(libname, verbose=True)}
for _ in range(depth):
for lib in libs:
dirname = os.path.dirname(lib)
for dep in get_deps(lib):
path = os.path.join(dirname, dep)
if not os.path.exists(path):
continue
if path not in libs:
libs.append(path)
symbols[dep] = get_defined_symbols(path, verbose=True)
for lib in libs:
lib_symbols = symbols[os.path.basename(lib)]
lib_keys = set(lib_symbols.keys())
rc = f"{lib} symbols size {humansize(sum(lib_symbols.values()))}"
for dep in get_deps(lib):
if dep not in symbols:
continue
dep_overlap = lib_keys.intersection(set(symbols[dep].keys()))
overlap_size = sum(lib_symbols[k] for k in dep_overlap)
if overlap_size > 0:
rc += f" {dep} overlap is {humansize(overlap_size)}"
print(rc)
def print_symbols_overlap(libname1: str, libname2: str) -> None:
sym1 = get_defined_symbols(libname1, verbose=True)
sym2 = get_defined_symbols(libname2, verbose=True)
sym1_size = sum(sym1.values())
sym2_size = sum(sym2.values())
sym_overlap = set(sym1.keys()).intersection(set(sym2.keys()))
overlap_size = sum(sym1[s] for s in sym_overlap)
if overlap_size == 0:
print(f"{libname1} symbols size {humansize(sym1_size)} does not overlap with {libname2}")
return
print(f"{libname1} symbols size {humansize(sym1_size)} overlap {humansize(overlap_size)} ({100.0 * overlap_size/sym1_size :.2f}%)")
for sym in sym_overlap:
print(sym)
if __name__ == '__main__':
if len(sys.argv) == 3:
print_symbols_overlap(sys.argv[1], sys.argv[2])
else:
print_sizes(sys.argv[1] if len(sys.argv) > 1 else "lib/libtorch_cuda.so")
|
#!/usr/bin/env python3
from datetime import datetime, timedelta
from typing import Any, Dict, List, Iterable, Optional, Union
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import enum
import os
class IssueState(enum.Enum):
OPEN = "open"
CLOSED = "closed"
ALL = "all"
def __str__(self):
return self.value
class GitCommit:
commit_hash: str
title: str
body: str
author: str
author_date: datetime
commit_date: Optional[datetime]
def __init__(self,
commit_hash: str,
author: str,
author_date: datetime,
title: str,
body: str,
commit_date: Optional[datetime] = None) -> None:
self.commit_hash = commit_hash
self.author = author
self.author_date = author_date
self.commit_date = commit_date
self.title = title
self.body = body
def __contains__(self, item: Any) -> bool:
return item in self.body or item in self.title
def get_revert_revision(commit: GitCommit) -> Optional[str]:
import re
body_rc = re.search("Original Phabricator Diff: (D\\d+)", commit.body)
if commit.title.startswith("Back out \"") and body_rc is not None:
return body_rc.group(1)
rc = re.match("Revert (D\\d+):", commit.title)
if rc is None:
return None
return rc.group(1)
def get_diff_revision(commit: GitCommit) -> Optional[str]:
import re
rc = re.search("\\s*Differential Revision: (D\\d+)", commit.body)
if rc is None:
return None
return rc.group(1)
def get_ghf_revert_revision(commit: GitCommit) -> Optional[str]:
import re
rc = re.search("\\s*This reverts commit ([0-9a-f]+).", commit.body)
if all([
commit.title.startswith("Revert"),
commit.author == "PyTorch MergeBot <[email protected]>",
rc is not None
]):
return rc.group(1)
return None
def is_revert(commit: GitCommit) -> bool:
return get_revert_revision(commit) is not None or get_ghf_revert_revision(commit) is not None
def parse_medium_format(lines: Union[str, List[str]]) -> GitCommit:
"""
Expect commit message generated using `--format=medium --date=unix` format, i.e.:
commit <sha1>
Author: <author>
Date: <author date>
<title line>
<full commit message>
"""
if isinstance(lines, str):
lines = lines.split("\n")
# TODO: Handle merge commits correctly
if len(lines) > 1 and lines[1].startswith("Merge:"):
del lines[1]
assert len(lines) > 5
assert lines[0].startswith("commit")
assert lines[1].startswith("Author: ")
assert lines[2].startswith("Date: ")
assert len(lines[3]) == 0
return GitCommit(commit_hash=lines[0].split()[1].strip(),
author=lines[1].split(":", 1)[1].strip(),
author_date=datetime.fromtimestamp(int(lines[2].split(":", 1)[1].strip())),
title=lines[4].strip(),
body="\n".join(lines[5:]),
)
def parse_fuller_format(lines: Union[str, List[str]]) -> GitCommit:
"""
Expect commit message generated using `--format=fuller --date=unix` format, i.e.:
commit <sha1>
Author: <author>
AuthorDate: <author date>
Commit: <committer>
CommitDate: <committer date>
<title line>
<full commit message>
"""
if isinstance(lines, str):
lines = lines.split("\n")
# TODO: Handle merge commits correctly
if len(lines) > 1 and lines[1].startswith("Merge:"):
del lines[1]
assert len(lines) > 7
assert lines[0].startswith("commit")
assert lines[1].startswith("Author: ")
assert lines[2].startswith("AuthorDate: ")
assert lines[3].startswith("Commit: ")
assert lines[4].startswith("CommitDate: ")
assert len(lines[5]) == 0
return GitCommit(commit_hash=lines[0].split()[1].strip(),
author=lines[1].split(":", 1)[1].strip(),
author_date=datetime.fromtimestamp(int(lines[2].split(":", 1)[1].strip())),
commit_date=datetime.fromtimestamp(int(lines[4].split(":", 1)[1].strip())),
title=lines[6].strip(),
body="\n".join(lines[7:]),
)
def _check_output(items: List[str], encoding='utf-8') -> str:
from subprocess import check_output
return check_output(items).decode(encoding)
def get_git_remotes(path: str) -> Dict[str, str]:
keys = _check_output(["git", "-C", path, "remote"]).strip().split("\n")
return {key: _check_output(["git", "-C", path, "remote", "get-url", key]).strip() for key in keys}
class GitRepo:
def __init__(self, path, remote='upstream'):
self.repo_dir = path
self.remote = remote
def _run_git_cmd(self, *args) -> str:
return _check_output(['git', '-C', self.repo_dir] + list(args))
def _run_git_log(self, revision_range) -> List[GitCommit]:
log = self._run_git_cmd('log', '--format=fuller',
'--date=unix', revision_range, '--', '.').split("\n")
rc: List[GitCommit] = []
cur_msg: List[str] = []
for line in log:
if line.startswith("commit"):
if len(cur_msg) > 0:
rc.append(parse_fuller_format(cur_msg))
cur_msg = []
cur_msg.append(line)
if len(cur_msg) > 0:
rc.append(parse_fuller_format(cur_msg))
return rc
def get_commit_list(self, from_ref, to_ref) -> List[GitCommit]:
return self._run_git_log(f"{self.remote}/{from_ref}..{self.remote}/{to_ref}")
def get_ghstack_orig_branches(self) -> List[str]:
return [x.strip() for x in self._run_git_cmd("branch", "--remotes", "--list", self.remote + "/gh/*/orig").strip().split("\n")]
def show_ref(self, ref) -> str:
return self._run_git_cmd("show-ref", ref).split(" ")[0]
def merge_base(self, ref1, ref2) -> str:
return self._run_git_cmd("merge-base", ref1, ref2).strip()
def rev_list(self, ref):
return self._run_git_cmd("rev-list", f"{self.remote}/main..{ref}").strip().split()
def build_commit_dict(commits: List[GitCommit]) -> Dict[str, GitCommit]:
rc = {}
for commit in commits:
assert commit.commit_hash not in rc
rc[commit.commit_hash] = commit
return rc
def fetch_json(url: str, params: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
headers = {'Accept': 'application/vnd.github.v3+json'}
token = os.environ.get("GITHUB_TOKEN")
if token is not None and url.startswith('https://api.github.com/'):
headers['Authorization'] = f'token {token}'
if params is not None and len(params) > 0:
url += '?' + '&'.join(f"{name}={val}" for name, val in params.items())
try:
with urlopen(Request(url, headers=headers)) as data:
return json.load(data)
except HTTPError as err:
if err.code == 403 and all(key in err.headers for key in ['X-RateLimit-Limit', 'X-RateLimit-Used']):
print(f"Rate limit exceeded: {err.headers['X-RateLimit-Used']}/{err.headers['X-RateLimit-Limit']}")
raise
def fetch_multipage_json(url: str, params: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
if params is None:
params = {}
assert "page" not in params
page_idx, rc, prev_len, params = 1, [], -1, params.copy()
while len(rc) > prev_len:
prev_len = len(rc)
params["page"] = page_idx
page_idx += 1
rc += fetch_json(url, params)
return rc
def gh_get_milestones(org='pytorch', project='pytorch', state: IssueState = IssueState.OPEN) -> List[Dict[str, Any]]:
url = f'https://api.github.com/repos/{org}/{project}/milestones'
return fetch_multipage_json(url, {"state": state})
def gh_get_milestone_issues(org: str, project: str, milestone_idx: int, state: IssueState = IssueState.OPEN):
url = f'https://api.github.com/repos/{org}/{project}/issues'
return fetch_multipage_json(url, {"milestone": milestone_idx, "state": state})
def gh_get_ref_statuses(org: str, project: str, ref: str) -> Dict[str, Any]:
url = f'https://api.github.com/repos/{org}/{project}/commits/{ref}/status'
params = {"page": 1, "per_page": 100}
nrc = rc = fetch_json(url, params)
while "statuses" in nrc and len(nrc["statuses"]) == 100:
params["page"] += 1
nrc = fetch_json(url, params)
if "statuses" in nrc:
rc["statuses"] += nrc["statuses"]
return rc
def extract_statuses_map(json: Dict[str, Any]):
return {s["context"]: s["state"] for s in json["statuses"]}
class PeriodStats:
commits: int
reverts: int
authors: int
date: datetime
def __init__(self, date: datetime, commits: int, reverts: int, authors: int) -> None:
self.date = date
self.commits = commits
self.reverts = reverts
self.authors = authors
def get_monthly_stats(commits: List[GitCommit]) -> Iterable[PeriodStats]:
y, m, total, reverts, authors = None, None, 0, 0, set()
for commit in commits:
commit_date = commit.commit_date if commit.commit_date is not None else commit.author_date
if y != commit_date.year or m != commit_date.month:
if y is not None:
yield PeriodStats(datetime(y, m, 1), total, reverts, len(authors))
y, m, total, reverts, authors = commit_date.year, commit_date.month, 0, 0, set()
if is_revert(commit):
reverts += 1
total += 1
authors.add(commit.author)
def print_monthly_stats(commits: List[GitCommit]) -> None:
stats = list(get_monthly_stats(commits))
for idx, stat in enumerate(stats):
y = stat.date.year
m = stat.date.month
total, reverts, authors = stat.commits, stat.reverts, stat.authors
reverts_ratio = 100.0 * reverts / total
if idx + 1 < len(stats):
commits_growth = 100.0 * (stat.commits / stats[idx + 1].commits - 1)
else:
commits_growth = float('nan')
print(f"{y}-{m:02d}: commits {total} ({commits_growth:+.1f}%) reverts {reverts} ({reverts_ratio:.1f}%) authors {authors}")
def print_reverts(commits: List[GitCommit]) -> None:
for commit in commits:
if not is_revert(commit):
continue
print(f"{commit.commit_date} {commit.title} {commit.commit_hash} {commit.body}")
def analyze_reverts(commits: List[GitCommit]):
for idx, commit in enumerate(commits):
revert_id = get_revert_revision(commit)
if revert_id is None:
continue
orig_commit = None
for i in range(1, 100):
orig_commit = commits[idx + i]
if get_diff_revision(orig_commit) == revert_id:
break
if orig_commit is None:
print(f"Failed to find original commit for {commit.title}")
continue
print(f"{commit.commit_hash} is a revert of {orig_commit.commit_hash}: {orig_commit.title}")
revert_statuses = gh_get_ref_statuses("pytorch", "pytorch", commit.commit_hash)
orig_statuses = gh_get_ref_statuses("pytorch", "pytorch", orig_commit.commit_hash)
orig_sm = extract_statuses_map(orig_statuses)
revert_sm = extract_statuses_map(revert_statuses)
for k in revert_sm.keys():
if k not in orig_sm:
continue
if orig_sm[k] != revert_sm[k]:
print(f"{k} {orig_sm[k]}->{revert_sm[k]}")
def print_contributor_stats(commits, delta: Optional[timedelta] = None) -> None:
authors: Dict[str, int] = {}
now = datetime.now()
# Default delta is one non-leap year
if delta is None:
delta = timedelta(days=365)
for commit in commits:
date, author = commit.commit_date, commit.author
if now - date > delta:
break
if author not in authors:
authors[author] = 0
authors[author] += 1
print(f"{len(authors)} contributors made {sum(authors.values())} commits in last {delta.days} days")
for count, author in sorted(((commit, author) for author, commit in authors.items()), reverse=True):
print(f"{author}: {count}")
def commits_missing_in_branch(repo: GitRepo, branch: str, orig_branch: str, milestone_idx: int) -> None:
def get_commits_dict(x, y):
return build_commit_dict(repo.get_commit_list(x, y))
main_commits = get_commits_dict(orig_branch, 'main')
release_commits = get_commits_dict(orig_branch, branch)
print(f"len(main_commits)={len(main_commits)}")
print(f"len(release_commits)={len(release_commits)}")
print("URL;Title;Status")
for issue in gh_get_milestone_issues('pytorch', 'pytorch', milestone_idx, IssueState.ALL):
html_url, state = issue["html_url"], issue["state"]
# Skip closed states if they were landed before merge date
if state == "closed":
mentioned_after_cut = any(html_url in commit_message for commit_message in main_commits.values())
# If issue is not mentioned after cut, that it must be present in release branch
if not mentioned_after_cut:
continue
mentioned_in_release = any(html_url in commit_message for commit_message in release_commits.values())
# if Issue is mentioned is release branch, than it was picked already
if mentioned_in_release:
continue
print(f'{html_url};{issue["title"]};{state}')
def analyze_stacks(repo: GitRepo) -> None:
from tqdm.contrib.concurrent import thread_map
branches = repo.get_ghstack_orig_branches()
stacks_by_author: Dict[str, List[int]] = {}
for branch,rv_commits in thread_map(lambda x: (x, repo.rev_list(x)), branches, max_workers=10):
author = branch.split("/")[2]
if author not in stacks_by_author:
stacks_by_author[author]=[]
stacks_by_author[author].append(len(rv_commits))
for author, slen in sorted(stacks_by_author.items(), key=lambda x:len(x[1]), reverse=True):
if len(slen) == 1:
print(f"{author} has 1 stack of depth {slen[0]}")
continue
print(f"{author} has {len(slen)} stacks max depth is {max(slen)} avg depth is {sum(slen)/len(slen):.2f} mean is {slen[len(slen)//2]}")
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser(description="Print GitHub repo stats")
parser.add_argument("--repo-path",
type=str,
help="Path to PyTorch git checkout",
default=os.path.expanduser("~/git/pytorch/pytorch"))
parser.add_argument("--milestone-id", type=str)
parser.add_argument("--branch", type=str)
parser.add_argument("--remote",
type=str,
help="Remote to base off of",
default="")
parser.add_argument("--analyze-reverts", action="store_true")
parser.add_argument("--print-reverts", action="store_true")
parser.add_argument("--contributor-stats", action="store_true")
parser.add_argument("--missing-in-branch", action="store_true")
parser.add_argument("--analyze-stacks", action="store_true")
return parser.parse_args()
def main():
import time
args = parse_arguments()
remote = args.remote
if not remote:
remotes = get_git_remotes(args.repo_path)
# Pick best remote
remote = next(iter(remotes.keys()))
for key in remotes:
if remotes[key].endswith('github.com/pytorch/pytorch'):
remote = key
repo = GitRepo(args.repo_path, remote)
if args.analyze_stacks:
analyze_stacks(repo)
return
if args.missing_in_branch:
# Use milestone idx or search it along milestone titles
try:
milestone_idx = int(args.milestone_id)
except ValueError:
milestone_idx = -1
milestones = gh_get_milestones()
for milestone in milestones:
if milestone.get('title', '') == args.milestone_id:
milestone_idx = int(milestone.get('number', '-2'))
if milestone_idx < 0:
print(f'Could not find milestone {args.milestone_id}')
return
commits_missing_in_branch(repo,
args.branch,
f'orig/{args.branch}',
milestone_idx)
return
print(f"Parsing git history with remote {remote}...", end='', flush=True)
start_time = time.time()
x = repo._run_git_log(f"{remote}/main")
print(f"done in {time.time()-start_time:.1f} sec")
if args.analyze_reverts:
analyze_reverts(x)
elif args.contributor_stats:
print_contributor_stats(x)
elif args.print_reverts:
print_reverts(x[:2**9])
else:
print_monthly_stats(x)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""Helper script to package wheels and relocate binaries."""
import glob
import hashlib
import io
# Standard library imports
import os
import os.path as osp
import platform
import shutil
import subprocess
import sys
import zipfile
from base64 import urlsafe_b64encode
# Third party imports
if sys.platform == "linux":
from auditwheel.lddtree import lddtree
from wheel.bdist_wheel import get_abi_tag
ALLOWLIST = {
"libgcc_s.so.1",
"libstdc++.so.6",
"libm.so.6",
"libdl.so.2",
"librt.so.1",
"libc.so.6",
"libnsl.so.1",
"libutil.so.1",
"libpthread.so.0",
"libresolv.so.2",
"libX11.so.6",
"libXext.so.6",
"libXrender.so.1",
"libICE.so.6",
"libSM.so.6",
"libGL.so.1",
"libgobject-2.0.so.0",
"libgthread-2.0.so.0",
"libglib-2.0.so.0",
"ld-linux-x86-64.so.2",
"ld-2.17.so",
}
WINDOWS_ALLOWLIST = {
"MSVCP140.dll",
"KERNEL32.dll",
"VCRUNTIME140_1.dll",
"VCRUNTIME140.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
}
HERE = osp.dirname(osp.abspath(__file__))
PACKAGE_ROOT = osp.dirname(osp.dirname(HERE))
PLATFORM_ARCH = platform.machine()
PYTHON_VERSION = sys.version_info
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def rehash(path, blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, "rb") as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")
# unicode/str python2 issues
return (digest, str(length)) # type: ignore
def unzip_file(file, dest):
"""Decompress zip `file` into directory `dest`."""
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(dest)
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None
On macOS systems, a .app is considered installed if
it exists.
"""
if sys.platform == "darwin" and basename.endswith(".app") and osp.exists(basename):
return basename
for path in os.environ["PATH"].split(os.pathsep):
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == "nt":
# Windows platforms
extensions = (".exe", ".bat", ".cmd", ".dll")
if not basename.endswith(extensions):
names = [basename + ext for ext in extensions] + [basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def patch_new_path(library_path, new_dir):
library = osp.basename(library_path)
name, *rest = library.split(".")
rest = ".".join(rest)
hash_id = hashlib.sha256(library_path.encode("utf-8")).hexdigest()[:8]
new_name = ".".join([name, hash_id, rest])
return osp.join(new_dir, new_name)
def find_dll_dependencies(dumpbin, binary):
out = subprocess.run([dumpbin, "/dependents", binary], stdout=subprocess.PIPE)
out = out.stdout.strip().decode("utf-8")
start_index = out.find("dependencies:") + len("dependencies:")
end_index = out.find("Summary")
dlls = out[start_index:end_index].strip()
dlls = dlls.split(os.linesep)
dlls = [dll.strip() for dll in dlls]
return dlls
def relocate_elf_library(patchelf, output_dir, output_library, binary):
"""
Relocate an ELF shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel while updating their respective rpaths.
"""
print("Relocating {0}".format(binary))
binary_path = osp.join(output_library, binary)
ld_tree = lddtree(binary_path)
tree_libs = ld_tree["libs"]
binary_queue = [(n, binary) for n in ld_tree["needed"]]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
library_info = tree_libs[library]
print(library)
if library_info["path"] is None:
print("Omitting {0}".format(library))
continue
if library in ALLOWLIST:
# Omit glibc/gcc/system libraries
print("Omitting {0}".format(library))
continue
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_info["path"]
binary_queue += [(n, library) for n in library_info["needed"]]
print("Copying dependencies to wheel directory")
new_libraries_path = osp.join(output_dir, "torchcsprng.libs")
os.makedirs(new_libraries_path)
new_names = {binary: binary_path}
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = patch_new_path(library_path, new_libraries_path)
print("{0} -> {1}".format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
new_names[library] = new_library_path
print("Updating dependency names by new files")
for library in binary_paths:
if library != binary:
if library not in binary_dependencies:
continue
library_dependencies = binary_dependencies[library]
new_library_name = new_names[library]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print("{0}: {1} -> {2}".format(library, dep, new_dep))
subprocess.check_output(
[patchelf, "--replace-needed", dep, new_dep, new_library_name],
cwd=new_libraries_path,
)
print("Updating library rpath")
subprocess.check_output(
[patchelf, "--set-rpath", "$ORIGIN", new_library_name],
cwd=new_libraries_path,
)
subprocess.check_output(
[patchelf, "--print-rpath", new_library_name], cwd=new_libraries_path
)
print("Update library dependencies")
library_dependencies = binary_dependencies[binary]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print("{0}: {1} -> {2}".format(binary, dep, new_dep))
subprocess.check_output(
[patchelf, "--replace-needed", dep, new_dep, binary], cwd=output_library
)
print("Update library rpath")
subprocess.check_output(
[patchelf, "--set-rpath", "$ORIGIN:$ORIGIN/../torchcsprng.libs", binary_path],
cwd=output_library,
)
def relocate_dll_library(dumpbin, output_dir, output_library, binary):
"""
Relocate a DLL/PE shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel.
"""
print("Relocating {0}".format(binary))
binary_path = osp.join(output_library, binary)
library_dlls = find_dll_dependencies(dumpbin, binary_path)
binary_queue = [(dll, binary) for dll in library_dlls]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
if library in WINDOWS_ALLOWLIST or library.startswith("api-ms-win"):
print("Omitting {0}".format(library))
continue
library_path = find_program(library)
if library_path is None:
print("{0} not found".format(library))
continue
if osp.basename(osp.dirname(library_path)) == "system32":
continue
print("{0}: {1}".format(library, library_path))
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_path
downstream_dlls = find_dll_dependencies(dumpbin, library_path)
binary_queue += [(n, library) for n in downstream_dlls]
print("Copying dependencies to wheel directory")
package_dir = osp.join(output_dir, "torchcsprng")
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = osp.join(package_dir, library)
print("{0} -> {1}".format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
def compress_wheel(output_dir, wheel, wheel_dir, wheel_name):
"""Create RECORD file and compress wheel distribution."""
print("Update RECORD file in wheel")
dist_info = glob.glob(osp.join(output_dir, "*.dist-info"))[0]
record_file = osp.join(dist_info, "RECORD")
with open(record_file, "w") as f:
for root, _, files in os.walk(output_dir):
for this_file in files:
full_file = osp.join(root, this_file)
rel_file = osp.relpath(full_file, output_dir)
if full_file == record_file:
f.write("{0},,\n".format(rel_file))
else:
digest, size = rehash(full_file)
f.write("{0},{1},{2}\n".format(rel_file, digest, size))
print("Compressing wheel")
base_wheel_name = osp.join(wheel_dir, wheel_name)
shutil.make_archive(base_wheel_name, "zip", output_dir)
os.remove(wheel)
shutil.move("{0}.zip".format(base_wheel_name), wheel)
shutil.rmtree(output_dir)
def patch_linux():
# Get patchelf location
patchelf = find_program("patchelf")
if patchelf is None:
raise FileNotFoundError(
"Patchelf was not found in the system, please"
" make sure that is available on the PATH."
)
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.so"
video_binary = "video_reader.so"
torchcsprng_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print("{0}".format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding ELF dependencies...")
output_library = osp.join(output_dir, "torchcsprng")
for binary in torchcsprng_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_elf_library(patchelf, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
def patch_win():
# Get dumpbin location
dumpbin = find_program("dumpbin")
if dumpbin is None:
raise FileNotFoundError(
"Dumpbin was not found in the system, please"
" make sure that is available on the PATH."
)
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.pyd"
video_binary = "video_reader.pyd"
torchcsprng_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print("{0}".format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding DLL/PE dependencies...")
output_library = osp.join(output_dir, "torchcsprng")
for binary in torchcsprng_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_dll_library(dumpbin, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
if __name__ == "__main__":
if sys.platform == "linux":
patch_linux()
elif sys.platform == "win32":
patch_win()
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import random
import time
import unittest
import numpy as np
import torch
from Crypto.Cipher import AES
from Crypto.Util import Counter
from scipy import stats
try:
import torchcsprng as csprng
except ImportError:
raise RuntimeError("CSPRNG not available")
IS_SANDCASTLE = (
os.getenv("SANDCASTLE") == "1" or os.getenv("TW_JOB_USER") == "sandcastle"
)
IS_FBCODE = os.getenv("PYTORCH_TEST_FBCODE") == "1"
def to_numpy(t, dtype=torch.float):
if t.dtype == torch.bfloat16:
t = t.to(dtype)
return t.numpy()
def to_bytes(t):
if t.dtype == torch.bfloat16:
t = t.view(torch.int16)
return t.cpu().numpy().view(np.int8)
class TestCSPRNG(unittest.TestCase):
all_generators = [
csprng.create_random_device_generator(),
csprng.create_random_device_generator("/dev/urandom"),
csprng.create_mt19937_generator(),
csprng.create_mt19937_generator(42),
]
int_dtypes = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
standard_fp_dtypes = [torch.float, torch.double]
non_standard_fp_dtypes = [torch.half, torch.bfloat16]
fp_dtypes = standard_fp_dtypes + non_standard_fp_dtypes
num_dtypes = int_dtypes + fp_dtypes
all_dtypes = num_dtypes + [torch.bool]
size = 1000
all_devices = (
["cpu", "cuda"]
if (torch.cuda.is_available() and csprng.supports_cuda())
else ["cpu"]
)
def test_random_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
if dtype == torch.float:
to_inc = 2**24
elif dtype == torch.double:
to_inc = 2**53
elif dtype == torch.half:
to_inc = 2**11
elif dtype == torch.bfloat16:
to_inc = 2**8
else:
to_inc = torch.iinfo(dtype).max
t = torch.empty(self.size, dtype=dtype, device=device).random_(
generator=gen
)
res = stats.kstest(
to_numpy(t.cpu()), stats.randint.cdf, args=(0, to_inc)
)
self.assertTrue(res.statistic < 0.1)
no_cuda = not torch.cuda.is_available() or not csprng.supports_cuda()
no_cuda_message = (
"CUDA is not available or csprng was not compiled with CUDA support"
)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_cpu_vs_cuda(self):
for dtype in self.num_dtypes:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").random_(
generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").random_(
generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_to_kstest(self):
to_ = 42
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
t = torch.zeros(self.size, dtype=dtype, device=device).random_(
to_, generator=gen
)
res = stats.kstest(
to_numpy(t.cpu()), stats.randint.cdf, args=(0, to_)
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_to_cpu_vs_cuda(self):
to_ = 42
for dtype in self.num_dtypes:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.zeros(self.size, dtype=dtype, device="cpu").random_(
to_, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.zeros(self.size, dtype=dtype, device="cuda").random_(
to_, generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_from_to_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
for from_ in [0, 24, 42]:
for to_ in [42, 99, 123]:
if from_ < to_:
t = torch.zeros(
self.size, dtype=dtype, device=device
).random_(from_, to_, generator=gen)
res = stats.kstest(
to_numpy(t.cpu()),
stats.randint.cdf,
args=(from_, to_),
)
self.assertTrue(res.statistic < 0.2)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_from_to_cpu_vs_cuda(self):
for dtype in self.num_dtypes:
for from_ in [0, 24, 42]:
for to_ in [42, 99, 123]:
if from_ < to_:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.zeros(
self.size, dtype=dtype, device="cpu"
).random_(from_, to_, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.zeros(
self.size, dtype=dtype, device="cuda"
).random_(from_, to_, generator=gen)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_bool(self):
for device in self.all_devices:
for gen in self.all_generators:
t = torch.empty(self.size, dtype=torch.bool, device=device)
t.fill_(False)
t.random_(generator=gen)
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(
0.4 < (t.eq(True)).to(torch.int).sum().item() / self.size < 0.6
)
t.fill_(True)
t.random_(generator=gen)
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(
0.4 < (t.eq(True)).to(torch.int).sum().item() / self.size < 0.6
)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_bool_cpu_vs_cuda(self):
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=torch.bool, device="cpu").random_(
generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=torch.bool, device="cuda").random_(
generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_uniform_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for from_ in [-42, 0, 4.2]:
for to_ in [-4.2, 0, 42]:
if to_ > from_:
t = torch.empty(
self.size, dtype=dtype, device=device
).uniform_(from_, to_, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"uniform",
args=(from_, (to_ - from_)),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_uniform_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for from_ in [-42, 0, 4.2]:
for to_ in [-4.2, 0, 42]:
if to_ > from_:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(
self.size, dtype=dtype, device="cpu"
).uniform_(from_, to_, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).uniform_(from_, to_, generator=gen)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_normal_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
t = torch.empty(
self.size, dtype=dtype, device=device
).normal_(mean=mean, std=std, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"norm",
args=(mean, std),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_normal_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").normal_(
mean=mean, std=std, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").normal_(
mean=mean, std=std, generator=gen
)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_log_normal_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
t = torch.empty(
self.size, dtype=dtype, device=device
).log_normal_(mean=mean, std=std, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"lognorm",
args=(std, 0, math.exp(mean)),
)
if dtype in [torch.half, torch.bfloat16]:
self.assertTrue(res.statistic < 0.4)
else:
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_log_normal_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(
self.size, dtype=dtype, device="cpu"
).log_normal_(mean=mean, std=std, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).log_normal_(mean=mean, std=std, generator=gen)
self.assertTrue(
torch.allclose(cpu_t, cuda_t.cpu(), 1e-4, equal_nan=True)
)
def test_exponential_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for lambd in [0.5, 1.0, 5.0]:
t = torch.empty(
self.size, dtype=dtype, device=device
).exponential_(lambd=lambd, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"expon",
args=(
0,
1 / lambd,
),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
@unittest.skip("https://github.com/pytorch/pytorch/issues/38662")
def test_exponential_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for lambd in [0.5, 1.0, 5.0]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").exponential_(
lambd=lambd, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).exponential_(lambd=lambd, generator=gen)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_cauchy_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for median in [-10, 0, 50]:
for sigma in [0.5, 1.0, 10.0]:
t = torch.empty(
self.size, dtype=dtype, device=device
).cauchy_(median=median, sigma=sigma, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"cauchy",
args=(median, sigma),
)
if dtype in [torch.half, torch.bfloat16]:
self.assertTrue(res.statistic < 0.4)
else:
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_cauchy_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for median in [-10, 0, 50]:
for sigma in [0.5, 1.0, 10.0]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").cauchy_(
median=median, sigma=sigma, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").cauchy_(
median=median, sigma=sigma, generator=gen
)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_geometric(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for p in [0.2, 0.5, 0.8]:
t = torch.empty(
self.size, dtype=dtype, device=device
).geometric_(p=p, generator=gen)
# actual = np.histogram(t.cpu().to(torch.double), np.arange(1, 100))[0]
# expected = stats.geom(p).pmf(np.arange(1, 99)) * self.size
# res = stats.chisquare(actual, expected)
# self.assertAlmostEqual(res.pvalue, 1.0, delta=0.5) TODO https://github.com/pytorch/csprng/issues/7
@unittest.skipIf(no_cuda, no_cuda_message)
def test_geometric_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for p in [0.2, 0.5, 0.8]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").geometric_(
p=p, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").geometric_(
p=p, generator=gen
)
self.assertTrue(
torch.allclose(cpu_t, cuda_t.cpu(), 1e-9, equal_nan=True)
)
def test_non_contiguous_vs_contiguous(self):
size = 10
for device in self.all_devices:
for dtype in self.all_dtypes:
for i in range(10):
t = torch.zeros([size, size, size], dtype=dtype, device=device)
x1 = random.randrange(0, size)
y1 = random.randrange(0, size)
z1 = random.randrange(0, size)
x2 = random.randrange(x1 + 1, max(x1 + 2, size))
y2 = random.randrange(y1 + 1, max(y1 + 2, size))
z2 = random.randrange(z1 + 1, max(z1 + 2, size))
maybe_non_contiguous = t[x1:x2, y1:y2, z1:z2]
assert maybe_non_contiguous.numel() > 0
if not maybe_non_contiguous.is_contiguous():
seed = random.randrange(1000)
non_contiguous = maybe_non_contiguous
gen = csprng.create_mt19937_generator(seed)
non_contiguous.random_(generator=gen)
contiguous = torch.zeros_like(non_contiguous)
gen = csprng.create_mt19937_generator(seed)
contiguous.random_(generator=gen)
assert contiguous.is_contiguous()
self.assertTrue((non_contiguous == contiguous).all())
for x in range(0, size):
for y in range(0, size):
for z in range(0, size):
if (
not x1 <= x < x2
and not y1 <= y < y2
and not z1 <= z < z2
):
self.assertTrue(t[x, y, z] == 0)
@unittest.skipIf(IS_SANDCASTLE or IS_FBCODE, "Does not work on Sandcastle")
@unittest.skipIf(torch.get_num_threads() < 2, "requires multithreading CPU")
def test_cpu_parallel(self):
urandom_gen = csprng.create_random_device_generator("/dev/urandom")
def measure(size):
t = torch.empty(size, dtype=torch.float32, device="cpu")
start = time.time()
for i in range(20):
t.normal_(generator=urandom_gen)
finish = time.time()
return finish - start
time_for_1K = measure(1000)
time_for_1M = measure(1000000)
# Pessimistic check that parallel execution gives >= 1.5 performance boost
self.assertTrue(time_for_1M / time_for_1K < 1000 / 1.5)
@unittest.skipIf(IS_SANDCASTLE or IS_FBCODE, "Does not work on Sandcastle")
def test_version(self):
self.assertTrue(csprng.__version__)
self.assertTrue(csprng.git_version)
def test_randperm(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.int_dtypes:
for size in range(0, 20):
expected = torch.arange(size, dtype=dtype, device=device)
actual = torch.randperm(
size, dtype=dtype, device=device, generator=gen
)
actual_out = torch.empty(1, dtype=dtype, device=device)
torch.randperm(size, out=actual_out, generator=gen)
if size >= 10:
self.assertTrue(not torch.allclose(expected, actual))
self.assertTrue(not torch.allclose(expected, actual_out))
actual = actual.sort()[0]
actual_out = actual.sort()[0]
self.assertTrue(torch.allclose(expected, actual))
self.assertTrue(torch.allclose(expected, actual_out))
def test_encrypt_decrypt(self):
key_size_bytes = 16
block_size_bytes = 16
def sizeof(dtype):
if dtype == torch.bool:
return 1
elif dtype.is_floating_point:
return torch.finfo(dtype).bits // 8
else:
return torch.iinfo(dtype).bits // 8
def pad(data, pad_size):
if len(data) % pad_size == 0:
return data
length = pad_size - (len(data) % pad_size)
return data + bytes([0]) * length
def create_aes(m, k):
if m == "ecb":
return AES.new(k.tobytes(), AES.MODE_ECB)
elif m == "ctr":
ctr = Counter.new(
AES.block_size * 8, initial_value=0, little_endian=True
)
return AES.new(k.tobytes(), AES.MODE_CTR, counter=ctr)
else:
return None
for key_dtype in self.all_dtypes:
key_size = key_size_bytes // sizeof(key_dtype)
key = torch.empty(key_size, dtype=key_dtype).random_()
key_np = to_bytes(key)
for initial_dtype in self.all_dtypes:
for initial_size in [0, 4, 8, 15, 16, 23, 42]:
initial = torch.empty(initial_size, dtype=initial_dtype).random_()
initial_np = to_bytes(initial)
initial_size_bytes = initial_size * sizeof(initial_dtype)
for encrypted_dtype in self.all_dtypes:
encrypted_size = (
(initial_size_bytes + block_size_bytes - 1)
// block_size_bytes
* block_size_bytes
// sizeof(encrypted_dtype)
)
encrypted = torch.zeros(encrypted_size, dtype=encrypted_dtype)
for decrypted_dtype in self.all_dtypes:
decrypted_size = (
initial_size_bytes + sizeof(decrypted_dtype) - 1
) // sizeof(decrypted_dtype)
decrypted = torch.zeros(
decrypted_size, dtype=decrypted_dtype
)
for mode in ["ecb", "ctr"]:
for device in self.all_devices:
key = key.to(device)
initial = initial.to(device)
encrypted = encrypted.to(device)
decrypted = decrypted.to(device)
csprng.encrypt(
initial, encrypted, key, "aes128", mode
)
encrypted_np = to_bytes(encrypted)
aes = create_aes(mode, key_np)
encrypted_expected = np.frombuffer(
aes.encrypt(
pad(initial_np.tobytes(), block_size_bytes)
),
dtype=np.int8,
)
self.assertTrue(
np.array_equal(encrypted_np, encrypted_expected)
)
csprng.decrypt(
encrypted, decrypted, key, "aes128", mode
)
decrypted_np = to_bytes(decrypted)[
:initial_size_bytes
]
aes = create_aes(mode, key_np)
decrypted_expected = np.frombuffer(
aes.decrypt(
pad(
encrypted_np.tobytes(), block_size_bytes
)
),
dtype=np.int8,
)[:initial_size_bytes]
self.assertTrue(
np.array_equal(decrypted_np, decrypted_expected)
)
self.assertTrue(
np.array_equal(initial_np, decrypted_np)
)
def test_encrypt_decrypt_inplace(self):
key_size_bytes = 16
def sizeof(dtype):
if dtype == torch.bool:
return 1
elif dtype.is_floating_point:
return torch.finfo(dtype).bits // 8
else:
return torch.iinfo(dtype).bits // 8
def create_aes(m, k):
if m == "ecb":
return AES.new(k.tobytes(), AES.MODE_ECB)
elif m == "ctr":
ctr = Counter.new(
AES.block_size * 8, initial_value=0, little_endian=True
)
return AES.new(k.tobytes(), AES.MODE_CTR, counter=ctr)
else:
return None
for key_dtype in self.all_dtypes:
key_size = key_size_bytes // sizeof(key_dtype)
key = torch.empty(key_size, dtype=key_dtype).random_()
key_np = to_bytes(key)
for initial_dtype in self.all_dtypes:
for initial_size_bytes in [0, 16, 256]:
initial_size = initial_size_bytes // sizeof(initial_dtype)
initial = torch.empty(initial_size, dtype=initial_dtype).random_()
initial_np = to_bytes(initial)
initial_np_copy = np.copy(initial_np)
for mode in ["ecb", "ctr"]:
for device in self.all_devices:
key = key.to(device)
initial = initial.to(device)
csprng.encrypt(initial, initial, key, "aes128", mode)
encrypted_np = to_bytes(initial)
aes = create_aes(mode, key_np)
encrypted_expected = np.frombuffer(
aes.encrypt(initial_np_copy.tobytes()), dtype=np.int8
)
self.assertTrue(
np.array_equal(encrypted_np, encrypted_expected)
)
encrypted_np_copy = np.copy(encrypted_np)
csprng.decrypt(initial, initial, key, "aes128", mode)
decrypted_np = to_bytes(initial)
aes = create_aes(mode, key_np)
decrypted_expected = np.frombuffer(
aes.decrypt(encrypted_np_copy.tobytes()), dtype=np.int8
)
self.assertTrue(
np.array_equal(decrypted_np, decrypted_expected)
)
self.assertTrue(
np.array_equal(initial_np_copy, decrypted_np)
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchcsprng._C import *
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
|
import accimage
import numpy as np
import imageio
import os
ACCIMAGE_SAVE = os.environ.get('ACCIMAGE_SAVE', '')
if len(ACCIMAGE_SAVE) and ACCIMAGE_SAVE.lower() not in {'0', 'false', 'no'}:
SAVE_IMAGES = True
else:
SAVE_IMAGES = False
def image_to_np(image):
"""
Returns:
np.ndarray: Image converted to array with shape (width, height, channels)
"""
image_np = np.empty([image.channels, image.height, image.width], dtype=np.uint8)
image.copyto(image_np)
image_np = np.transpose(image_np, (1, 2, 0))
return image_np
def save_image(path, image):
imageio.imwrite(path, image_to_np(image))
def test_reading_image():
image = accimage.Image("chicago.jpg")
if SAVE_IMAGES:
save_image('test_reading_image.jpg', image)
assert image.width == 1920
assert image.height == 931
def test_reading_image_from_memory():
from_file = accimage.Image("chicago.jpg")
bytes = open("chicago.jpg", "rb").read()
from_bytes = accimage.Image(bytes)
if SAVE_IMAGES:
save_image('test_reading_image_from_memory.jpg', from_bytes)
assert from_bytes.width == 1920
assert from_bytes.height == 931
np.testing.assert_array_equal(image_to_np(from_file), image_to_np(from_bytes))
def test_resizing():
image = accimage.Image("chicago.jpg")
image.resize(size=(200, 200))
if SAVE_IMAGES:
save_image('test_resizing.jpg', image)
assert image.width == 200
assert image.height == 200
def test_cropping():
image = accimage.Image("chicago.jpg")
image.crop(box=(50, 50, 150, 150))
if SAVE_IMAGES:
save_image('test_cropping.jpg', image)
assert image.width == 100
assert image.height == 100
def test_flipping():
image = accimage.Image("chicago.jpg")
original_image_np = image_to_np(image)
FLIP_LEFT_RIGHT = 0
image.transpose(FLIP_LEFT_RIGHT)
if SAVE_IMAGES:
save_image('test_flipping.jpg', image)
new_image_np = image_to_np(image)
assert image.width == 1920
assert image.height == 931
np.testing.assert_array_equal(new_image_np[:, ::-1, :], original_image_np)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Obtains credentials and passes them as CLI args to stack invocation
"""
import os
import argparse
import json
import subprocess
import sys
import tools.deployment.args_assembly as args_assembly
THIS_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
DEFAULT_CREDENTIALS_DIRECTORY = os.path.join(THIS_DIRECTORY, "../circleci-failure-tracker-credentials")
def parse_args():
parser = argparse.ArgumentParser(description='Run webapp locally')
parser.add_argument('--github-app-pem-filepath', dest='github_app_pem_filepath',
default=os.path.join(DEFAULT_CREDENTIALS_DIRECTORY, "circleci-failure-attribution.private-key.pem"),
help='File containing GitHub personal access token')
parser.add_argument('--circleci-api-token-file', dest='circleci_api_token_file',
default=os.path.join(DEFAULT_CREDENTIALS_DIRECTORY, "circleci-api-token.txt"),
help='File containing GitHub personal access token')
parser.add_argument('--aws-sqs-queue-url-file', dest='aws_sqs_queue_url_file',
default=os.path.join(DEFAULT_CREDENTIALS_DIRECTORY, "aws-sqs-queue-url.txt"),
help='File containing AWS SQS queue URL')
# Note: the "local" credentials use "github-client-id" and "github-client-secret" for
# the GitHub app named "circleci-failure-attribution-dev", while
# the "remote" credentials use a client id and secret for the GitHub app named "circleci-failure-attribution".
# The local credentials should be used along with ngrok
# (or something similar, like localtunnel: https://localtunnel.github.io/www/) for exposing the app
# on a local port.
parser.add_argument('--prod-app', dest='prod_app', action="store_true", help='For production deployment (default is local). Implies --remote-db')
parser.add_argument('--prod-db', dest='prod_db', action="store_true", help='Use production (remote) database (default is local)')
parser.add_argument('--credentials-json-basedir', dest='credentials_json_basedir',
default=DEFAULT_CREDENTIALS_DIRECTORY,
help='Path to JSON file containing various webapp credentials')
parser.add_argument('--dockerrun-json-output-path', dest='dockerrun_json',
default="Dockerrun.aws.json",
help='Path to write Dockerrun.aws.json file')
parser.add_argument('--no-force-ssl', dest='no_force_ssl', action="store_true", help='Do not force SSL redirection in args placed into Dockerrun.aws.json')
parser.add_argument('--port-override', dest='port_override', type=int, help='Override of local port')
parser.add_argument('--entrypoint', dest='entrypoint_override', help='Entrypoint binary name (excluding leading path) for Dockerrun.aws.json')
parser.add_argument('--notification-ingester', dest='notification_ingester', action="store_true", help='Build for the notification ingester application')
parser.add_argument('--gitdir', dest='repo_gitdir', help='PyTorch git directory')
parser.add_argument('--oneoff', dest='run_oneoff', action='store_true', help='Run oneoff test suite')
return parser.parse_args()
def gen_credentials_filename(is_db, is_remote, suffix=None):
credential_type = "database" if is_db else "app"
locality_suffix = "remote" if is_remote else "local"
arglist = [credential_type, "credentials", locality_suffix]
if suffix:
arglist.append(suffix)
return "-".join(arglist) + ".json"
if __name__ == "__main__":
options = parse_args()
if options.run_oneoff and options.repo_gitdir is None:
print("--gitdir must be defined to run oneoff unittests")
sys.exit(-1)
using_prod_db = options.prod_app or options.prod_db
app_credentials_json_path = os.path.join(options.credentials_json_basedir, gen_credentials_filename(False, options.prod_app))
db_credentials_json_path = os.path.join(options.credentials_json_basedir, gen_credentials_filename(True, using_prod_db))
db_mview_credentials_json_path = os.path.join(options.credentials_json_basedir, gen_credentials_filename(True, using_prod_db, "mview-refresher"))
with open(app_credentials_json_path) as fh_app, open(db_credentials_json_path) as fh_db, open(db_mview_credentials_json_path) as fh_mview_db:
github_app_pem_content = open(options.github_app_pem_filepath).read().strip()
circleci_api_token = open(options.circleci_api_token_file).read().strip()
aws_sqs_queue_url = open(options.aws_sqs_queue_url_file).read().strip()
nondefault_cli_arglist = args_assembly.generate_app_nondefault_cli_arglist(
json.load(fh_app),
json.load(fh_db),
json.load(fh_mview_db),
github_app_pem_content,
circleci_api_token,
aws_sqs_queue_url,
options.notification_ingester,
options.no_force_ssl,
options.port_override,
run_oneoff=options.run_oneoff)
if options.prod_app:
args_assembly.generate_dockerrun_aws_json(options.dockerrun_json, nondefault_cli_arglist, options.entrypoint_override)
else:
os.system('find -name "*.tix" -delete')
default_binary_name = args_assembly.ONEOFF_BINARY_NAME if options.run_oneoff else args_assembly.WEBAPP_BINARY_NAME
binary_name = options.entrypoint_override if options.entrypoint_override else default_binary_name
cli_args = [
"stack",
"run",
binary_name,
"--",
] + ([
"--local",
"--data-path",
"static",
] if binary_name != args_assembly.ONEOFF_BINARY_NAME else [
"--repo-git-dir",
options.repo_gitdir,
]) + nondefault_cli_arglist
command_string = " ".join(cli_args)
print("Executing command:", command_string)
subprocess.check_call(cli_args, cwd="app")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import subprocess
import argparse
import requests
def get_linear_commits(repo_path):
"""
Returns the most recent sequence of commits
that have a linear ancestry, ordered from oldest to newest
"""
command_args = [
"git",
"rev-list",
"--parents",
"origin/master",
]
command_string = " ".join(command_args)
print("Command:", command_string)
output = subprocess.check_output(command_args, cwd=repo_path)
linear_commits = []
for line in output.decode('utf-8').splitlines():
stripped = line.strip()
splitted = stripped.split()
if len(splitted) > 2:
print("First merge commit: " + str(splitted))
break
else:
linear_commits.append(splitted[0])
return list(reversed(linear_commits))
def upload_commits(hostname, auth_token, commits):
url = hostname + '/api/populate-master-commits'
headers_dict = {
'content-type': 'application/json',
'token': auth_token,
}
r = requests.post(url, verify=False, json=commits, headers=headers_dict)
print(r.json())
print(r.status_code)
def parse_args():
parser = argparse.ArgumentParser(description='Fetch master commits')
parser.add_argument('--repo-path', dest='repo_path', required=True, help='PyTorch repo path')
parser.add_argument('--token', dest='token', required=True, help='GitHub auth token')
parser.add_argument('--hostname', dest='hostname', required=True, help='Server hostname')
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
linear_commits = get_linear_commits(options.repo_path)
print("Count:", len(linear_commits))
upload_commits(options.hostname, options.token, linear_commits)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import subprocess
import argparse
import requests
import json
PARENT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
def get_first_merge_commit(repo_path):
"""
Returns the most recent sequence of commits
that have a linear ancestry, ordered from oldest to newest
"""
command_args = [
"git",
"rev-list",
"--parents",
"origin/master",
]
command_string = " ".join(command_args)
# print("Command:", command_string)
output = subprocess.check_output(command_args, cwd=repo_path)
for line in output.decode('utf-8').splitlines():
stripped = line.strip()
splitted = stripped.split()
if len(splitted) > 2:
return splitted[0]
def get_metadata_aspect(repo_path, commit_sha1, format_specifier):
my_command = "git log --format=" + format_specifier + " -n 1 " + commit_sha1
commit_message = subprocess.check_output(my_command, cwd=repo_path, shell=True)
return commit_message.strip()
KEYS_AND_FORMAT_SPECIFIERS = {
"message": "%B",
"sha1": "%H",
"subject": "%f",
"tree_sha1": "%T",
"author_name": "%an",
"author_email": "%aE",
"author_date": "%ai",
"committer_name": "%cN",
"committer_email": "%cE",
"committer_date": "%ci",
}
def get_all_metadata_aspects(repo_path, commit_sha1):
return {k: get_metadata_aspect(repo_path, commit_sha1, v) for k, v in KEYS_AND_FORMAT_SPECIFIERS.items()}
def get_log_json_list(repo_path, merge_commit, maybe_single_commit):
"""
Returns the most recent sequence of commits
that have a linear ancestry, ordered from oldest to newest
"""
command_args = [
os.path.join(PARENT_DIRECTORY, "git-log2json.sh"),
]
if maybe_single_commit:
command_args.extend(["-n1", maybe_single_commit])
else:
command_args.append(merge_commit + ".." + "origin/master")
# print("command: " + " ".join(command_args))
output = subprocess.check_output(command_args, cwd=repo_path)
old_json = json.loads(output)
# Get sanitized commit messages
new_json = []
for i, item in enumerate(old_json):
print("progress: %d/%d" % (i + 1, len(old_json)))
commit_sha1 = item["sha1"]
item["message"] = get_metadata_aspect(repo_path, commit_sha1, "%B")
new_json.append(item)
return new_json
def upload_commits(hostname, auth_token, commits):
url = hostname + '/api/populate-master-commit-metadata'
headers_dict = {
'content-type': 'application/json',
'token': auth_token,
}
r = requests.post(url, verify=False, json=commits, headers=headers_dict)
print(r.json())
print(r.status_code)
def get_last_excluded_commit(options):
if options.from_scratch:
return get_first_merge_commit(options.repo_path)
else:
print("Determining latest commit that has metadata...")
url = options.hostname + '/api/latest-master-commit-with-metadata'
r = requests.get(url, verify=False)
parsed_json = r.json()
print(parsed_json)
if parsed_json["success"]:
return parsed_json["payload"]
else:
return get_first_merge_commit(options.repo_path)
def get_commit_infos_from_list(options):
# First, update the repo
# 'git fetch --force origin "refs/pull/*:refs/remotes/origin/pr/*"'
url = options.hostname + '/api/broken-commits-without-metadata'
r = requests.get(url, verify=False)
parsed_json = r.json()
print("Getting metdata for %d commits..." % len(parsed_json))
metadata_list = []
failed_sha1s = []
for i, commit_sha1 in enumerate(parsed_json):
print("Progress: %d/%d" % (i + 1, len(parsed_json)))
try:
metadata_list.append(get_all_metadata_aspects(options.repo_path, commit_sha1))
except:
print("Skipping", commit_sha1)
failed_sha1s.append(commit_sha1)
return metadata_list
def parse_args():
parser = argparse.ArgumentParser(description='Fetch master commits')
parser.add_argument('--repo-path', dest='repo_path', required=True, help='PyTorch repo path')
parser.add_argument('--token', dest='token', required=True, help='GitHub auth token')
parser.add_argument('--hostname', dest='hostname', required=True, help='Server hostname')
parser.add_argument('--from-scratch', dest='from_scratch', action="store_true", help='Populate the database from scratch')
parser.add_argument('--single-commit', dest='single_commit', help='Single commit to retrieve')
parser.add_argument('--commit-list-from-api', dest='commit_list_from_api', action="store_true", help='Get list of commits from API')
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
if options.commit_list_from_api:
commit_list_json = get_commit_infos_from_list(options)
else:
merge_commit = get_last_excluded_commit(options)
print("Starting (excluded) commit:", merge_commit)
commit_list_json = get_log_json_list(options.repo_path, merge_commit, options.single_commit)
print("Populating metadata for", len(commit_list_json), "commits...")
upload_commits(options.hostname, options.token, commit_list_json)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from optparse import OptionParser, OptionGroup
import pygraphviz as pgv
import psycopg2
import sys
# Query found here:
# https://stackoverflow.com/a/46594226/105137
def writedeps(conn):
sql = """WITH RECURSIVE view_deps AS (
SELECT DISTINCT dependent_ns.nspname as dependent_schema
, dependent_view.relname as dependent_view
, source_ns.nspname as source_schema
, source_table.relname as source_table
FROM pg_depend
JOIN pg_rewrite ON pg_depend.objid = pg_rewrite.oid
JOIN pg_class as dependent_view ON pg_rewrite.ev_class = dependent_view.oid
JOIN pg_class as source_table ON pg_depend.refobjid = source_table.oid
JOIN pg_namespace dependent_ns ON dependent_ns.oid = dependent_view.relnamespace
JOIN pg_namespace source_ns ON source_ns.oid = source_table.relnamespace
WHERE NOT (dependent_ns.nspname = source_ns.nspname AND dependent_view.relname = source_table.relname)
UNION
SELECT DISTINCT dependent_ns.nspname as dependent_schema
, dependent_view.relname as dependent_view
, source_ns.nspname as source_schema
, source_table.relname as source_table
FROM pg_depend
JOIN pg_rewrite ON pg_depend.objid = pg_rewrite.oid
JOIN pg_class as dependent_view ON pg_rewrite.ev_class = dependent_view.oid
JOIN pg_class as source_table ON pg_depend.refobjid = source_table.oid
JOIN pg_namespace dependent_ns ON dependent_ns.oid = dependent_view.relnamespace
JOIN pg_namespace source_ns ON source_ns.oid = source_table.relnamespace
INNER JOIN view_deps vd
ON vd.dependent_schema = source_ns.nspname
AND vd.dependent_view = source_table.relname
AND NOT (dependent_ns.nspname = vd.dependent_schema AND dependent_view.relname = vd.dependent_view)
)
SELECT *
FROM view_deps
WHERE dependent_schema = 'public'
ORDER BY source_schema, source_table;"""
G=pgv.AGraph(directed=True)
with conn.cursor() as cursor:
cursor.execute(sql)
for row in cursor.fetchall():
dependent_schema, dependent_view, source_schema, source_table = row
print('"%s" -> "%s";' % (dependent_view, source_table))
G.add_edge(dependent_view, source_table, color='blue')
G.layout(prog='dot')
G.draw('view-dependencies.png')
def main():
parser = OptionParser()
group = OptionGroup(parser, "Database Options")
group.add_option("--dbname", action="store", dest="dbname",
help="The database name.")
group.add_option("--dbhost", action="store", dest="dbhost",
default="localhost", help="The database host.")
group.add_option("--dbuser", action="store", dest="dbuser",
help="The database username.")
group.add_option("--dbpass", action="store", dest="dbpass",
help="The database password.")
parser.add_option_group(group)
(options, args) = parser.parse_args()
if not options.dbname:
print("Please supply a database name, see --help for more info.")
sys.exit(1)
try:
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'"
% (options.dbname, options.dbuser, options.dbhost, options.dbpass))
writedeps(conn)
except psycopg2.OperationalError as e:
print("Failed to connect to database,",)
print("perhaps you need to supply auth details:\n %s" % str(e))
print("Use --help for more info.")
sys.exit(1)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from timeit import default_timer as timer
import logging
import boto3
from botocore.exceptions import ClientError
import time
import urllib.request
import zipfile
import io
MAX_LOG_URLS_RETRIEVAL_RETRIES = 5
LOG_DIR_PREFIX = "var/log/eb-docker/containers/eb-current-app"
def process_zip_file(file_obj, worker_instance_id):
relevant_lines_for_log = []
with zipfile.ZipFile(file_obj) as zip_ref:
log_files = filter(lambda info: info.filename.startswith(LOG_DIR_PREFIX), zip_ref.infolist())
sorted_log_files_list = sorted(log_files, key=lambda x: x.date_time, reverse=True)
# Only examine one log per zip file
for info in sorted_log_files_list[:1]:
with zip_ref.open(info) as log_fh:
log_lines = log_fh.readlines()
with open(worker_instance_id + ".log", "wb") as output_fh:
for line in log_lines:
output_fh.write(line)
for line in log_lines:
line_string = line.decode('UTF-8').strip()
if line_string.startswith("Posted to: /worker/scan-sha1"):
relevant_lines_for_log.append(line_string)
return relevant_lines_for_log
def get_eb_worker_logs(eb_environment_id):
eb_client = boto3.client('elasticbeanstalk', region_name='us-east-2')
try:
msg = eb_client.request_environment_info(
EnvironmentId=eb_environment_id,
InfoType='bundle',
)
print("First message:", msg)
for i in range(MAX_LOG_URLS_RETRIEVAL_RETRIES):
msg2 = eb_client.retrieve_environment_info(
EnvironmentId=eb_environment_id,
InfoType='bundle',
)
environment_info_list = msg2.get("EnvironmentInfo", [])
if environment_info_list:
log_timestamp_url_tuples_by_instance_id = {}
for log_item in environment_info_list:
s3_url = log_item['Message']
log_timestamp = log_item['SampleTimestamp']
ec2_instance_id = log_item['Ec2InstanceId']
log_timestamp_url_tuples_by_instance_id.setdefault(ec2_instance_id, []).append((log_timestamp, s3_url, ec2_instance_id))
log_timestamp_url_tuples = list(map(lambda x: x[0], sorted(log_timestamp_url_tuples_by_instance_id.values(), key=lambda x: x[0], reverse=True)))
print("Log URL count:", len(log_timestamp_url_tuples))
return log_timestamp_url_tuples
else:
print("Environment info was empty on iteration %d. Sleeping..." % i)
time.sleep(5)
except ClientError as e:
logging.error(e)
return None
def run():
start = timer()
log_list = get_eb_worker_logs('e-ev8fq2dhbv')
for timestamp, url, instance_id in log_list:
print("timestamp:", timestamp)
print("url:", url)
with urllib.request.urlopen(url) as download_file_obj:
in_memory_file = io.BytesIO(download_file_obj.read())
relevant_lines = process_zip_file(in_memory_file, instance_id)
for i, line in enumerate(relevant_lines):
print("\t", i, ":", line)
end = timer()
execution_seconds = end - start
print("Completed in", execution_seconds, "seconds")
if __name__ == "__main__":
run()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import base64
WEBAPP_BINARY_NAME = "my-webapp"
ONEOFF_BINARY_NAME = "scan-oneoff"
WEBAPP_INTERNAL_PORT = 3001
def generate_dockerrun_aws_json(output_path, nondefault_cli_arglist, entrypoint_override=None):
json_object = {
"AWSEBDockerrunVersion": "1",
"Image": {
"Name": "kostmo/circleci-failure-tracker-img-small-my-webapp",
},
"Ports": [
{
"ContainerPort": WEBAPP_INTERNAL_PORT,
}
],
"Entrypoint": os.path.join("/opt/app", entrypoint_override if entrypoint_override else WEBAPP_BINARY_NAME),
"Command": " ".join(nondefault_cli_arglist),
}
with open(output_path, "w") as fh:
json.dump(json_object, fh, indent=4, sort_keys=True)
def generate_app_nondefault_cli_arglist(
app_credentials_json,
db_credentials_json,
db_mview_credentials_json,
github_app_pem_content,
circleci_api_token,
aws_sqs_queue_url,
is_notification_ingester,
no_force_ssl,
port_override,
run_oneoff = False):
arg_list = [
"--db-hostname",
db_credentials_json["db-hostname"],
"--db-username",
db_credentials_json["db-user"],
"--db-password",
db_credentials_json["db-password"],
"--github-app-rsa-pem",
base64.b64encode(github_app_pem_content.encode('ascii')).decode(),
"--aws-sqs-queue-url",
aws_sqs_queue_url,
"--circleci-api-token",
circleci_api_token,
]
if run_oneoff:
return arg_list
arg_list += [
"--github-client-id",
app_credentials_json["github-client-id"],
"--github-client-secret",
app_credentials_json["github-client-secret"],
"--github-webhook-secret",
app_credentials_json["github-webhook-secret"],
"--db-mview-username",
db_mview_credentials_json["db-user"],
"--db-mview-password",
db_mview_credentials_json["db-password"],
"--admin-password",
app_credentials_json["admin-password"],
]
if no_force_ssl:
arg_list.append("--no-force-ssl")
if port_override:
arg_list.extend(["--port", str(port_override)])
return arg_list
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import sys
import argparse
import json
CURRENT_DIR = os.path.dirname(__file__)
REPO_ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, "../.."))
def parse_args():
parser = argparse.ArgumentParser(description='Manipulate VIEWs in the database')
parser.add_argument('--backup', dest='backup', action="store_true", help='Backs up the views. When false, creates the views in the database.')
return parser.parse_args()
def get_view_names():
view_names = []
schema_filepath = os.path.join(REPO_ROOT_DIR, "configuration/schema.sql")
for line in open(schema_filepath):
matches = re.search("CREATE VIEW public\.([^\s+]+) ", line)
if matches:
view_names.append(matches.group(1))
return view_names
def get_db_hostname():
with open(os.path.join(REPO_ROOT_DIR, "../circleci-failure-tracker-credentials/database-credentials-remote.json")) as fh:
data = json.load(fh)
return data["db-hostname"]
SCRIPT_PATH = "view-creation.sql"
def dump_view_creation_script():
view_names = get_view_names()
print("There are", len(view_names), "views.")
db_hostname = get_db_hostname()
cli_args = [
"pg_dump",
"-h",
db_hostname,
"-s",
"-U",
"postgres",
"-d",
"loganci",
]
for v in view_names:
cli_args.extend(["-t", v])
cli_args.extend([">", SCRIPT_PATH])
cli_string = " ".join(cli_args)
print("CLI string:", cli_string)
os.system(cli_string)
def run_view_creation_script():
# psql --no-password -U postgres -h $DB_HOSTNAME < ../configuration/schema.sql
db_hostname = get_db_hostname()
cli_args = [
"psql",
"--no-password",
"-U",
"postgres",
"-h",
db_hostname,
"-d",
"loganci",
"<",
SCRIPT_PATH,
]
cli_string = " ".join(cli_args)
print("CLI string:", cli_string)
os.system(cli_string)
if __name__ == "__main__":
options = parse_args()
if options.backup:
dump_view_creation_script()
else:
run_view_creation_script()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, sys
import json
THIS_SCRIPT_DIR = os.path.dirname(__file__)
CREDENTIALS_DIR = os.path.join(THIS_SCRIPT_DIR, "../../../circleci-failure-tracker-credentials")
def write_creds_module(source_filename, destination_filename):
json_source_path = os.path.join(CREDENTIALS_DIR, source_filename)
github_token_path = os.path.join(CREDENTIALS_DIR, "github-personal-access-token-repo-read-permissions.txt")
sqs_queue_url_path = os.path.join(CREDENTIALS_DIR, "aws-sqs-queue-url.txt")
dest_path = os.path.join("dr_ci_view_refresh", destination_filename)
with open(json_source_path) as json_fh, open(dest_path, "w") as output_module_fh:
creds_dict = json.load(json_fh)
output_module_fh.write('# This file is autogenerated!\n')
output_module_fh.write('db_hostname = "%s"\n' % creds_dict["db-hostname"])
output_module_fh.write('db_username = "%s"\n' % creds_dict["db-user"])
output_module_fh.write('db_password = "%s"\n' % creds_dict["db-password"])
output_module_fh.write('db_name = "%s"\n' % "loganci")
with open(github_token_path) as token_fh:
access_token = token_fh.read().strip()
output_module_fh.write('repo_read_auth_token = "%s"\n' % access_token)
with open(sqs_queue_url_path) as fh:
queue_url = fh.read().strip()
output_module_fh.write('sqs_queue_url = "%s"\n' % queue_url)
if __name__ == "__main__":
write_creds_module("database-credentials-remote-mview-refresher.json", "db_config.py")
write_creds_module("database-credentials-remote.json", "logan_db_config.py")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import psycopg2
import json
from timeit import default_timer as timer
from multiprocessing.pool import ThreadPool
import db_config
def view_refresh_lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
view_names = event["view-names"]
payload = update_multiple_views(view_names, "lambda")
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": payload,
}),
}
def update_multiple_views(view_names, event_source):
def long_running_func(view_name):
return {
"view": view_name,
"result": run(view_name, event_source),
}
p = ThreadPool(2)
return p.map(long_running_func, view_names)
WHITELISTED_VIEW_NAMES = {
"master_failures_raw_causes_mview",
"upstream_breakages_weekly_aggregation_mview",
"job_schedule_statistics_mview",
"master_failures_weekly_aggregation_mview",
"job_schedule_discriminated_mview",
"master_ordered_commits_with_metadata_mview",
"master_commit_job_success_completeness_mview",
"master_job_failure_spans_mview",
"master_job_failure_spans_conservative_mview",
"master_commit_reversion_spans_mview",
"master_required_unbuilt_jobs_mview",
"pattern_frequency_summary_mview",
"pr_merge_time_build_stats_by_master_commit_mview",
}
def run(view_name, trigger_source):
print("Now refreshing materialized view:", view_name)
# Can whitelist required to avoid SQL injection
# However, the Lambda endpoint should not be externally accessible,
# so the inputs are always trusted.
if view_name not in WHITELISTED_VIEW_NAMES:
return {"error": "Unsupported view name " + view_name}
conn = psycopg2.connect(host=db_config.db_hostname, database=db_config.db_name, user=db_config.db_username, password=db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;') # 3 seconds
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60*10)) # 10 minutes
print("Refresh begins now...")
start = timer()
# CONCURRENTLY is very important; it allows queries to be performed at the same time
# as the view is being refreshed (which happens very often).
# However, it does require a unique index to exist on the view.
cur.execute('REFRESH MATERIALIZED VIEW CONCURRENTLY %s;' % view_name)
# cur.execute('REFRESH MATERIALIZED VIEW %s;' % view_name)
end = timer()
execution_seconds = end - start
print("Refresh completed in ", execution_seconds, "seconds")
cur.execute('INSERT INTO lambda_logging.materialized_view_refresh_events (view_name, execution_duration_seconds, event_source) VALUES (%s, %s, %s);', (view_name, execution_seconds, trigger_source))
conn.commit()
print("Inserted operation record for", view_name, "refresh into database.")
return {
"elapsed_time_seconds": execution_seconds,
}
if __name__ == "__main__":
view_names = [
# "job_schedule_discriminated_mview",
# "master_ordered_commits_with_metadata_mview",
# "master_commit_reversion_spans_mview",
"master_ordered_commits_with_metadata_mview",
]
payload = update_multiple_views(view_names, "test")
print(payload)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import psycopg2
import json
from timeit import default_timer as timer
import logging
import boto3
from botocore.exceptions import ClientError
import logan_db_config
def record_queue_depth_lambda_handler(event, context):
my_payload = run()
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": my_payload,
}),
}
def get_queue_depth(sqs_queue_url):
sqs_client = boto3.client('sqs', region_name='us-east-2')
try:
msg = sqs_client.get_queue_attributes(
QueueUrl=sqs_queue_url,
AttributeNames=['ApproximateNumberOfMessages'],
)
except ClientError as e:
logging.error(e)
return None
return msg['Attributes']['ApproximateNumberOfMessages']
def run():
print("Now connecting to database...")
conn = psycopg2.connect(
host=logan_db_config.db_hostname,
database=logan_db_config.db_name,
user=logan_db_config.db_username,
password=logan_db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;')
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60*3)) # 3 minutes
print("Work begins now...")
start = timer()
queue_depth = get_queue_depth(logan_db_config.sqs_queue_url)
cur.execute("INSERT INTO lambda_logging.sqs_queue_depth_history (queue_depth) VALUES (%s) RETURNING inserted_at;", (queue_depth,))
insertion_timestamp = cur.fetchone()[0]
conn.commit()
print("Inserted queue depth at %s..." % insertion_timestamp)
end = timer()
execution_seconds = end - start
print("Completed in", execution_seconds, "seconds")
return queue_depth
if __name__ == "__main__":
run()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import psycopg2
import json
from timeit import default_timer as timer
import logging
import logan_db_config
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
def record_master_viability_lambda_handler(event, context):
my_payload = run()
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": my_payload,
}),
}
def run():
print("Now connecting to database...")
conn = psycopg2.connect(
host=logan_db_config.db_hostname,
database=logan_db_config.db_name,
user=logan_db_config.db_username,
password=logan_db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;')
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60)) # 1 minute
print("Work begins now...")
start = timer()
cur.execute("SELECT snapshot_master_viable_commit_age();")
conn.commit()
end = timer()
execution_seconds = end - start
print("Completed in", execution_seconds, "seconds")
return {
"elapsed_time_seconds": execution_seconds,
}
if __name__ == "__main__":
payload = run()
print(payload)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Reads the "expanded" version of .circleci/config.yml from an arbitrary
build of each commit from the "master" branch of the pytorch repo and
inserts workflow jobs and properties into the Dr. CI database.
"""
import psycopg2
import psycopg2.extras
import yaml
import json
import os
import requests
import subprocess
import hashlib
import argparse
import logan_db_config
CONFIG_YAML_SHA1_API_URL_TEMPLATE = "https://api.github.com/repos/pytorch/pytorch/contents/.circleci/config.yml?ref=%s"
def populate_config_yaml_lambda_handler(event, context):
payload = run(25)
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": payload,
}),
}
def get_config_yaml_content_sha1(local_repo_path, commit_sha1):
"""
Uses auth token to avoid GitHub API rate limiting
"""
if local_repo_path:
try:
file_hash = subprocess.check_output([
"git",
"--git-dir",
local_repo_path,
"rev-parse",
"%s:.circleci/config.yml" % commit_sha1,
]).decode('utf-8').strip()
return file_hash
except Exception as e:
print("Couldn't obtain file SHA1 from local repo:", str(e))
print("\tFetching content SHA1 from GitHub...")
repo_sha1_retrieval_url = CONFIG_YAML_SHA1_API_URL_TEMPLATE % commit_sha1
headers = {
'Authorization': 'token %s' % logan_db_config.repo_read_auth_token,
}
repo_request = requests.get(repo_sha1_retrieval_url, headers=headers)
github_api_response_json = repo_request.json()
sha1 = github_api_response_json.get("sha")
if not sha1:
print("PROBLEM:", github_api_response_json)
return sha1
def populate_db_yaml_records(cur, build_number, repo_yaml_content_sha1):
url = "https://circleci.com/api/v1.1/project/github/pytorch/pytorch/%d" % build_number
r = requests.get(url)
api_json_obj = r.json()
yaml_text = api_json_obj.get("circle_yml").get("string")
expanded_yaml_md5 = hashlib.md5(yaml_text.encode('utf-8')).hexdigest()
yaml_obj = yaml.safe_load(yaml_text)
workflows_dict = yaml_obj.get("workflows")
cur.execute(
'INSERT INTO circleci_config_yaml_hashes (expanded_yaml_content, expanded_yaml_md5, repo_yaml_sha1) VALUES (%s, %s, %s);',
(yaml_text, expanded_yaml_md5, repo_yaml_content_sha1)
)
branch_filters_by_job_by_workflow = {}
jobs_insertion_values = []
# (workflow, dependent_job, required_job)
job_dependency_tuples = []
schedule_insertion_values = []
for workflow_name, workflow_obj in filter(lambda x: x[0] != "version", workflows_dict.items()):
if type(workflow_obj) is dict:
cur.execute(
'INSERT INTO circleci_workflows_by_yaml_file (yaml_content_sha1, name) VALUES (%s, %s) RETURNING id;',
(repo_yaml_content_sha1, workflow_name)
)
workflow_id = cur.fetchone()[0]
cron_values = []
for trigger in workflow_obj.get("triggers", []):
schedule_obj = trigger.get("schedule", {})
for k, v in schedule_obj.items():
if k == "cron":
cron_values.append(v)
for v in cron_values:
schedule_insertion_values.append((workflow_id, v))
branch_filters_by_job = branch_filters_by_job_by_workflow.setdefault(workflow_id, {})
for job_obj in workflow_obj.get("jobs", []):
if type(job_obj) is dict:
job_name = list(job_obj.keys())[0]
for key_job_name, job_value_obj in job_obj.items():
branch_filter_only_obj = job_value_obj.get("filters", {}).get("branches", {}).get("only")
if type(branch_filter_only_obj) is list:
branch_filters_by_job.setdefault(job_name, []).extend(branch_filter_only_obj)
elif type(branch_filter_only_obj) is str:
branch_filters_by_job.setdefault(job_name, []).append(branch_filter_only_obj)
for required_job in job_value_obj.get("requires", []):
job_dependency_tuples.append((workflow_id, key_job_name, required_job))
else:
job_name = job_obj
jobs_insertion_values.append((workflow_id, job_name))
jobs_insert_query = 'INSERT INTO circleci_workflow_jobs (workflow, job_name) VALUES %s'
psycopg2.extras.execute_values(
cur, jobs_insert_query, jobs_insertion_values, template=None, page_size=100
)
dependencies_insert_query = 'INSERT INTO circleci_config_job_dependencies (workflow, dependent_job, required_job) VALUES %s'
psycopg2.extras.execute_values(
cur, dependencies_insert_query, job_dependency_tuples, template=None, page_size=100
)
filter_insertion_values = []
for workflow_id, branch_filters_by_job in branch_filters_by_job_by_workflow.items():
for job_name, filters_list in branch_filters_by_job.items():
filter_insertion_values.extend([(workflow_id, job_name, branch, True) for branch in filters_list])
insert_query2 = 'INSERT INTO circleci_job_branch_filters (workflow, job_name, branch, filter_include) VALUES %s'
psycopg2.extras.execute_values(
cur, insert_query2, filter_insertion_values, template=None, page_size=100
)
schedule_insert_query = 'INSERT INTO circleci_workflow_schedules (workflow, cron_schedule) VALUES %s'
psycopg2.extras.execute_values(
cur, schedule_insert_query, schedule_insertion_values, template=None, page_size=100
)
def populate_config_info(local_repo_path, cur, commit_sha1, build_number):
repo_yaml_content_sha1 = get_config_yaml_content_sha1(local_repo_path, commit_sha1)
if repo_yaml_content_sha1:
cur.execute(
"SELECT repo_yaml_sha1 FROM circleci_config_yaml_hashes WHERE repo_yaml_sha1=%s LIMIT 1;",
(repo_yaml_content_sha1,)
)
row = cur.fetchone()
if not row:
print("\tInserting workflow into database...")
populate_db_yaml_records(cur, build_number, repo_yaml_content_sha1)
else:
print("\tWorkflow is already in database.")
print("\tInserting git-commit/config.yaml association into database...")
cur.execute(
'INSERT INTO circleci_expanded_config_yaml_hashes_by_commit (commit_sha1, repo_yaml_sha1) VALUES (%s, %s);',
(commit_sha1, repo_yaml_content_sha1)
)
print("\tInserted git-commit/config.yaml association into database.")
else:
print("Couldn't retrieve file content sha1 for commit %s!" % commit_sha1)
def run(commit_count, local_repo_path=None):
if local_repo_path:
return_code = subprocess.call([
"git",
"--git-dir",
local_repo_path,
"fetch",
"origin",
"master",
])
print("Fetched local git repo with return code: %d" % return_code)
conn = psycopg2.connect(
host=logan_db_config.db_hostname,
database=logan_db_config.db_name,
user=logan_db_config.db_username,
password=logan_db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;') # 3 seconds
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60*3)) # 3 minutes
cur.execute("SELECT sha1, build_num FROM master_commits_unpopulated_circleci_configs LIMIT %s;", (commit_count,))
rows = cur.fetchall()
enumerated_rows = list(enumerate(rows))
def single_commit_populator(args_tuple):
(i, (commit_sha1, build_number)) = args_tuple
print("%d/%d: Populating CircleCI config for commit %s using build #%d..." % (i + 1, len(enumerated_rows), commit_sha1, build_number))
populate_config_info(local_repo_path, cur, commit_sha1, build_number)
# We don't allow concurrent actions here, since we don't want two Git commits
# with the same config.yml hash to race in database insertion.
#
# We commit the transaction after every row, so that we can make incremental progress
# even if the overall task fails.
for x in enumerated_rows:
single_commit_populator(x)
conn.commit()
return {
"foo": "bar",
}
def parse_args():
parser = argparse.ArgumentParser(description='Parse config.yml files for revisions of the pytorch repo')
parser.add_argument('--repo-path', dest='local_repo_path',
default=os.path.expanduser("~/github/pytorch-repos/pytorch/.git"),
help='Local filesystem path to pytorch repo .git directory')
parser.add_argument('--commit-count', dest='commit_count',
type=int,
default=2,
help='How many commits to retrieve')
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
payload = run(options.commit_count)
print(payload)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import pytest
from hello_world import app
@pytest.fixture()
def apigw_event():
""" Generates API GW Event"""
return {
"body": '{ "test": "body"}',
"resource": "/{proxy+}",
"requestContext": {
"resourceId": "123456",
"apiId": "1234567890",
"resourcePath": "/{proxy+}",
"httpMethod": "POST",
"requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
"accountId": "123456789012",
"identity": {
"apiKey": "",
"userArn": "",
"cognitoAuthenticationType": "",
"caller": "",
"userAgent": "Custom User Agent String",
"user": "",
"cognitoIdentityPoolId": "",
"cognitoIdentityId": "",
"cognitoAuthenticationProvider": "",
"sourceIp": "127.0.0.1",
"accountId": "",
},
"stage": "prod",
},
"queryStringParameters": {"foo": "bar"},
"headers": {
"Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
"Accept-Language": "en-US,en;q=0.8",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Mobile-Viewer": "false",
"X-Forwarded-For": "127.0.0.1, 127.0.0.2",
"CloudFront-Viewer-Country": "US",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Upgrade-Insecure-Requests": "1",
"X-Forwarded-Port": "443",
"Host": "1234567890.execute-api.us-east-1.amazonaws.com",
"X-Forwarded-Proto": "https",
"X-Amz-Cf-Id": "aaaaaaaaaae3VYQb9jd-nvCd-de396Uhbp027Y2JvkCPNLmGJHqlaA==",
"CloudFront-Is-Tablet-Viewer": "false",
"Cache-Control": "max-age=0",
"User-Agent": "Custom User Agent String",
"CloudFront-Forwarded-Proto": "https",
"Accept-Encoding": "gzip, deflate, sdch",
},
"pathParameters": {"proxy": "/examplepath"},
"httpMethod": "POST",
"stageVariables": {"baz": "qux"},
"path": "/examplepath",
}
def test_lambda_handler(apigw_event, mocker):
ret = app.lambda_handler(apigw_event, "")
data = json.loads(ret["body"])
assert ret["statusCode"] == 200
assert "message" in ret["body"]
assert data["message"] == "hello world"
# assert "location" in data.dict_keys()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#
# Tests :
# For all images
# can import torch and its version == required one
# can import ignite and its version == required one
# for all -vision images
# can import opencv without driver issue
# for all horovod images
# can import horovod and its version == required one
# for all msdp images
# can import deepspeed and its version == required one
#
# Requirements:
# pip install docker
#
import argparse
import json
import os
import docker
def run_python_cmd(cmd):
try_except_cmd = f"""
import warnings
warnings.filterwarnings("ignore")
def main():
{cmd}
try:
main()
except Exception as e:
import traceback
print(traceback.format_exc())
"""
try:
out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True)
assert isinstance(out, bytes), type(out)
out = out.decode("utf-8").strip()
out_lower = out.lower()
if any([k in out_lower for k in ["error", "exception"]]):
raise RuntimeError(out)
except docker.errors.ContainerError as e:
raise RuntimeError(e)
return out
base_cmd = """
import torch
import ignite
result = dict()
result["torch"] = torch.__version__
result["ignite"] = ignite.__version__
{hvd}
{msdp}
print(result)
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser("Check docker image script")
parser.add_argument("image", type=str, help="Docker image to check")
args = parser.parse_args()
client = docker.from_env()
docker_image_name = args.image
name, version = docker_image_name.split(":")
assert version != "latest", version
torch_version, ignite_version = version.split("-")
_, image_type = name.split("/")
expected_out = {
"torch": torch_version,
"ignite": ignite_version,
}
hvd_cmd = ""
if "hvd" in image_type:
hvd_cmd = 'import horovod; result["hvd"] = horovod.__version__'
assert "HVD_VERSION" in os.environ
val = os.environ["HVD_VERSION"]
expected_out["hvd"] = val if val[0] != "v" else val[1:]
msdp_cmd = ""
if "msdp" in image_type:
msdp_cmd = 'import deepspeed; result["msdp"] = deepspeed.__version__'
assert "MSDP_VERSION" in os.environ
val = os.environ["MSDP_VERSION"]
expected_out["msdp"] = val if val[0] != "v" else val[1:]
cmd = base_cmd.format(hvd=hvd_cmd, msdp=msdp_cmd)
out = run_python_cmd(cmd)
try:
out = out.replace("'", '"')
out = json.loads(out)
except json.decoder.JSONDecodeError:
raise RuntimeError(out)
for k, v in expected_out.items():
assert k in out, f"{k} not in {out.keys()}"
assert v in out[k], f"{v} not in {out[k]}"
if "vision" in image_type:
run_python_cmd("import cv2")
if "nlp" in image_type:
run_python_cmd("import torchtext, transformers")
if "apex" in image_type:
run_python_cmd("import apex")
|
import ignite.contrib
import ignite.distributed
import ignite.engine
import ignite.exceptions
import ignite.handlers
import ignite.metrics
import ignite.utils
__version__ = "0.5.0"
|
import collections.abc as collections
import functools
import hashlib
import logging
import random
import shutil
import warnings
from pathlib import Path
from typing import Any, Callable, cast, Dict, Optional, TextIO, Tuple, Type, TypeVar, Union
import torch
__all__ = [
"convert_tensor",
"apply_to_tensor",
"apply_to_type",
"to_onehot",
"setup_logger",
"manual_seed",
"hash_checkpoint",
]
def convert_tensor(
x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
"""Move tensors to relevant device.
Args:
x: input tensor or mapping, or sequence of tensors.
device: device type to move ``x``.
non_blocking: convert a CPU Tensor with pinned memory to a CUDA Tensor
asynchronously with respect to the host if possible
"""
def _func(tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor
return apply_to_tensor(x, _func)
def apply_to_tensor(
x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable
) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
"""Apply a function on a tensor or mapping, or sequence of tensors.
Args:
x: input tensor or mapping, or sequence of tensors.
func: the function to apply on ``x``.
"""
return apply_to_type(x, torch.Tensor, func)
def apply_to_type(
x: Union[Any, collections.Sequence, collections.Mapping, str, bytes],
input_type: Union[Type, Tuple[Type[Any], Any]],
func: Callable,
) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:
"""Apply a function on an object of `input_type` or mapping, or sequence of objects of `input_type`.
Args:
x: object or mapping or sequence.
input_type: data type of ``x``.
func: the function to apply on ``x``.
"""
if isinstance(x, input_type):
return func(x)
if isinstance(x, (str, bytes)):
return x
if isinstance(x, collections.Mapping):
return cast(Callable, type(x))({k: apply_to_type(sample, input_type, func) for k, sample in x.items()})
if isinstance(x, tuple) and hasattr(x, "_fields"): # namedtuple
return cast(Callable, type(x))(*(apply_to_type(sample, input_type, func) for sample in x))
if isinstance(x, collections.Sequence):
return cast(Callable, type(x))([apply_to_type(sample, input_type, func) for sample in x])
raise TypeError((f"x must contain {input_type}, dicts or lists; found {type(x)}"))
def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:
"""Convert a tensor of indices of any shape `(N, ...)` to a
tensor of one-hot indicators of shape `(N, num_classes, ...)` and of type uint8. Output's device is equal to the
input's device`.
Args:
indices: input tensor to convert.
num_classes: number of classes for one-hot tensor.
.. versionchanged:: 0.4.3
This functions is now torchscriptable.
"""
new_shape = (indices.shape[0], num_classes) + indices.shape[1:]
onehot = torch.zeros(new_shape, dtype=torch.uint8, device=indices.device)
return onehot.scatter_(1, indices.unsqueeze(1), 1)
def setup_logger(
name: Optional[str] = "ignite",
level: int = logging.INFO,
stream: Optional[TextIO] = None,
format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s",
filepath: Optional[str] = None,
distributed_rank: Optional[int] = None,
reset: bool = False,
) -> logging.Logger:
"""Setups logger: name, level, format etc.
Args:
name: new name for the logger. If None, the standard logger is used.
level: logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG.
stream: logging stream. If None, the standard stream is used (sys.stderr).
format: logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`.
filepath: Optional logging file path. If not None, logs are written to the file.
distributed_rank: Optional, rank in distributed configuration to avoid logger setup for workers.
If None, distributed_rank is initialized to the rank of process.
reset: if True, reset an existing logger rather than keep format, handlers, and level.
Returns:
logging.Logger
Examples:
Improve logs readability when training with a trainer and evaluator:
.. code-block:: python
from ignite.utils import setup_logger
trainer = ...
evaluator = ...
trainer.logger = setup_logger("trainer")
evaluator.logger = setup_logger("evaluator")
trainer.run(data, max_epochs=10)
# Logs will look like
# 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.
# 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23
# 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.
# 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02
# ...
Every existing logger can be reset if needed
.. code-block:: python
logger = setup_logger(name="my-logger", format="=== %(name)s %(message)s")
logger.info("first message")
setup_logger(name="my-logger", format="+++ %(name)s %(message)s", reset=True)
logger.info("second message")
# Logs will look like
# === my-logger first message
# +++ my-logger second message
Change the level of an existing internal logger
.. code-block:: python
setup_logger(
name="ignite.distributed.launcher.Parallel",
level=logging.WARNING
)
.. versionchanged:: 0.4.3
Added ``stream`` parameter.
.. versionchanged:: 0.4.5
Added ``reset`` parameter.
"""
# check if the logger already exists
existing = name is None or name in logging.root.manager.loggerDict
# if existing, get the logger otherwise create a new one
logger = logging.getLogger(name)
if distributed_rank is None:
import ignite.distributed as idist
distributed_rank = idist.get_rank()
# Remove previous handlers
if distributed_rank > 0 or reset:
if logger.hasHandlers():
for h in list(logger.handlers):
logger.removeHandler(h)
if distributed_rank > 0:
# Add null handler to avoid multiple parallel messages
logger.addHandler(logging.NullHandler())
# Keep the existing configuration if not reset
if existing and not reset:
return logger
if distributed_rank == 0:
logger.setLevel(level)
formatter = logging.Formatter(format)
ch = logging.StreamHandler(stream=stream)
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
if filepath is not None:
fh = logging.FileHandler(filepath)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
# don't propagate to ancestors
# the problem here is to attach handlers to loggers
# should we provide a default configuration less open ?
if name is not None:
logger.propagate = False
return logger
def manual_seed(seed: int) -> None:
"""Setup random state from a seed for `torch`, `random` and optionally `numpy` (if can be imported).
Args:
seed: Random state seed
.. versionchanged:: 0.4.3
Added ``torch.cuda.manual_seed_all(seed)``.
.. versionchanged:: 0.4.5
Added ``torch_xla.core.xla_model.set_rng_state(seed)``.
"""
random.seed(seed)
torch.manual_seed(seed)
try:
import torch_xla.core.xla_model as xm
xm.set_rng_state(seed)
except ImportError:
pass
try:
import numpy as np
np.random.seed(seed)
except ImportError:
pass
def deprecated(
deprecated_in: str, removed_in: str = "", reasons: Tuple[str, ...] = (), raise_exception: bool = False
) -> Callable:
F = TypeVar("F", bound=Callable[..., Any])
def decorator(func: F) -> F:
func_doc = func.__doc__ if func.__doc__ else ""
deprecation_warning = (
f"This function has been deprecated since version {deprecated_in}"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable:
if raise_exception:
raise DeprecationWarning(deprecation_warning)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
appended_doc = f".. deprecated:: {deprecated_in}" + ("\n\n\t" if len(reasons) > 0 else "")
for reason in reasons:
appended_doc += "\n\t- " + reason
wrapper.__doc__ = f"**Deprecated function**.\n\n {func_doc}{appended_doc}"
return cast(F, wrapper)
return decorator
def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path]) -> Tuple[Path, str]:
"""
Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``
to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
Args:
checkpoint_path: Path to the checkpoint file.
output_dir: Output directory to store the hashed checkpoint file
(will be created if not exist).
Returns:
Path to the hashed checkpoint file, the first 8 digits of SHA256 hash.
.. versionadded:: 0.4.8
"""
if isinstance(checkpoint_path, str):
checkpoint_path = Path(checkpoint_path)
if not checkpoint_path.exists():
raise FileNotFoundError(f"{checkpoint_path.name} does not exist in {checkpoint_path.parent}.")
if isinstance(output_dir, str):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
hash_obj = hashlib.sha256()
# taken from https://github.com/pytorch/vision/blob/main/references/classification/utils.py
with checkpoint_path.open("rb") as f:
# Read and update hash string value in blocks of 4KB
for byte_block in iter(lambda: f.read(4096), b""):
hash_obj.update(byte_block)
sha_hash = hash_obj.hexdigest()
old_filename = checkpoint_path.stem
new_filename = "-".join((old_filename, sha_hash[:8])) + ".pt"
hash_checkpoint_path = output_dir / new_filename
shutil.move(str(checkpoint_path), hash_checkpoint_path)
return hash_checkpoint_path, sha_hash
|
__all__ = ["NotComputableError"]
class NotComputableError(RuntimeError):
"""
Exception class to raise if Metric cannot be computed.
"""
|
# For compatibility
from ignite.utils import apply_to_tensor, apply_to_type, convert_tensor, to_onehot
__all__ = ["apply_to_tensor", "apply_to_type", "convert_tensor", "to_onehot"]
|
from typing import Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MeanSquaredError"]
class MeanSquaredError(Metric):
r"""Calculates the `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.
.. math:: \text{MSE} = \frac{1}{N} \sum_{i=1}^N \|y_{i} - x_{i}\|^2
where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanSquaredError()
metric.attach(default_evaluator, 'mse')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['mse'])
.. testoutput::
3.828125
"""
_state_dict_all_req_keys = ("_sum_of_squared_errors", "_num_examples")
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_squared_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
squared_errors = torch.pow(y_pred - y.view_as(y_pred), 2)
self._sum_of_squared_errors += torch.sum(squared_errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_squared_errors", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanSquaredError must have at least one example before it can be computed.")
return self._sum_of_squared_errors.item() / self._num_examples
|
from typing import Callable, Union
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers.timing import Timer
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
class Frequency(Metric):
"""Provides metrics for the number of examples processed per second.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. code-block:: python
# Compute number of tokens processed
wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps')
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
# Progress bar will look like
# Epoch [2/10]: [12/24] 50%|█████ , wps=400 [00:17<1:23]
To compute examples processed per second every 50th iteration:
.. code-block:: python
# Compute number of tokens processed
wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
# Progress bar will look like
# Epoch [2/10]: [50/100] 50%|█████ , wps=400 [00:17<00:35]
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
) -> None:
super(Frequency, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._timer = Timer()
self._acc = 0
self._n = 0
self._elapsed = 0.0
super(Frequency, self).reset()
@reinit__is_reduced
def update(self, output: int) -> None:
self._acc += output
self._n = self._acc
self._elapsed = self._timer.value()
@sync_all_reduce("_n", "_elapsed")
def compute(self) -> float:
time_divisor = 1.0
if idist.get_world_size() > 1:
time_divisor *= idist.get_world_size()
# Returns the average processed objects per second across all workers
return self._n / self._elapsed * time_divisor
def completed(self, engine: Engine, name: str) -> None:
engine.state.metrics[name] = int(self.compute())
# TODO: see issue https://github.com/pytorch/ignite/issues/1405
def attach( # type: ignore
self, engine: Engine, name: str, event_name: Events = Events.ITERATION_COMPLETED
) -> None:
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
|
from typing import Callable, Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MultiLabelConfusionMatrix"]
class MultiLabelConfusionMatrix(Metric):
"""Calculates a confusion matrix for multi-labelled, multi-class data.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must contain 0s and 1s and has the following shape (batch_size, num_classes, ...).
For example, `y_pred[i, j]` = 1 denotes that the j'th class is one of the labels of the i'th sample as predicted.
- `y` should have the following shape (batch_size, num_classes, ...) with 0s and 1s. For example,
`y[i, j]` = 1 denotes that the j'th class is one of the labels of the i'th sample according to the ground truth.
- both `y` and `y_pred` must be torch Tensors having any of the following types:
{torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64}. They must have the same dimensions.
- The confusion matrix 'M' is of dimension (num_classes, 2, 2).
* M[i, 0, 0] corresponds to count/rate of true negatives of class i
* M[i, 0, 1] corresponds to count/rate of false positives of class i
* M[i, 1, 0] corresponds to count/rate of false negatives of class i
* M[i, 1, 1] corresponds to count/rate of true positives of class i
- The classes present in M are indexed as 0, ... , num_classes-1 as can be inferred from above.
Args:
num_classes: Number of classes, should be > 1.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
normalized: whether to normalize confusion matrix by its sum or not.
Example:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MultiLabelConfusionMatrix(num_classes=3)
metric.attach(default_evaluator, "mlcm")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["mlcm"])
.. testoutput::
tensor([[[0, 4],
[0, 1]],
[[3, 1],
[0, 1]],
[[1, 2],
[2, 0]]])
.. versionadded:: 0.4.5
"""
_state_dict_all_req_keys = ("confusion_matrix", "_num_examples")
def __init__(
self,
num_classes: int,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
normalized: bool = False,
):
if num_classes <= 1:
raise ValueError("Argument num_classes needs to be > 1")
self.num_classes = num_classes
self._num_examples = 0
self.normalized = normalized
super(MultiLabelConfusionMatrix, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.confusion_matrix = torch.zeros(self.num_classes, 2, 2, dtype=torch.int64, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_input(output)
y_pred, y = output[0].detach(), output[1].detach()
self._num_examples += y.shape[0]
y_reshaped = y.transpose(0, 1).reshape(self.num_classes, -1)
y_pred_reshaped = y_pred.transpose(0, 1).reshape(self.num_classes, -1)
y_total = y_reshaped.sum(dim=1)
y_pred_total = y_pred_reshaped.sum(dim=1)
tp = (y_reshaped * y_pred_reshaped).sum(dim=1)
fp = y_pred_total - tp
fn = y_total - tp
tn = y_reshaped.shape[1] - tp - fp - fn
self.confusion_matrix += torch.stack([tn, fp, fn, tp], dim=1).reshape(-1, 2, 2).to(self._device)
@sync_all_reduce("confusion_matrix", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("Confusion matrix must have at least one example before it can be computed.")
if self.normalized:
conf = self.confusion_matrix.to(dtype=torch.float64)
sums = conf.sum(dim=(1, 2))
return conf / sums[:, None, None]
return self.confusion_matrix
def _check_input(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() < 2:
raise ValueError(
f"y_pred must at least have shape (batch_size, num_classes (currently set to {self.num_classes}), ...)"
)
if y.ndimension() < 2:
raise ValueError(
f"y must at least have shape (batch_size, num_classes (currently set to {self.num_classes}), ...)"
)
if y_pred.shape[0] != y.shape[0]:
raise ValueError(f"y_pred and y have different batch size: {y_pred.shape[0]} vs {y.shape[0]}")
if y_pred.shape[1] != self.num_classes:
raise ValueError(f"y_pred does not have correct number of classes: {y_pred.shape[1]} vs {self.num_classes}")
if y.shape[1] != self.num_classes:
raise ValueError(f"y does not have correct number of classes: {y.shape[1]} vs {self.num_classes}")
if y.shape != y_pred.shape:
raise ValueError("y and y_pred shapes must match.")
valid_types = (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
if y_pred.dtype not in valid_types:
raise ValueError(f"y_pred must be of any type: {valid_types}")
if y.dtype not in valid_types:
raise ValueError(f"y must be of any type: {valid_types}")
if not torch.equal(y_pred, y_pred**2):
raise ValueError("y_pred must be a binary tensor")
if not torch.equal(y, y**2):
raise ValueError("y must be a binary tensor")
|
from typing import Callable, Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["TopKCategoricalAccuracy"]
class TopKCategoricalAccuracy(Metric):
"""
Calculates the top-k categorical accuracy.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
k: the k in “top-k”.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
def process_function(engine, batch):
y_pred, y = batch
return y_pred, y
def one_hot_to_binary_output_transform(output):
y_pred, y = output
y = torch.argmax(y, dim=1) # one-hot vector to label index vector
return y_pred, y
engine = Engine(process_function)
metric = TopKCategoricalAccuracy(
k=2, output_transform=one_hot_to_binary_output_transform)
metric.attach(engine, 'top_k_accuracy')
preds = torch.tensor([
[0.7, 0.2, 0.05, 0.05], # 1 is in the top 2
[0.2, 0.3, 0.4, 0.1], # 0 is not in the top 2
[0.4, 0.4, 0.1, 0.1], # 0 is in the top 2
[0.7, 0.05, 0.2, 0.05] # 2 is in the top 2
])
target = torch.tensor([ # targets as one-hot vectors
[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0]
])
state = engine.run([[preds, target]])
print(state.metrics['top_k_accuracy'])
.. testoutput::
0.75
"""
_state_dict_all_req_keys = ("_num_correct", "_num_examples")
def __init__(
self,
k: int = 5,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(TopKCategoricalAccuracy, self).__init__(output_transform, device=device)
self._k = k
@reinit__is_reduced
def reset(self) -> None:
self._num_correct = torch.tensor(0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
sorted_indices = torch.topk(y_pred, self._k, dim=1)[1]
expanded_y = y.view(-1, 1).expand(-1, self._k)
correct = torch.sum(torch.eq(sorted_indices, expanded_y), dim=1)
self._num_correct += torch.sum(correct).to(self._device)
self._num_examples += correct.shape[0]
@sync_all_reduce("_num_correct", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError(
"TopKCategoricalAccuracy must have at least one example before it can be computed."
)
return self._num_correct.item() / self._num_examples
|
from typing import Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MeanAbsoluteError"]
class MeanAbsoluteError(Metric):
r"""Calculates `the mean absolute error <https://en.wikipedia.org/wiki/Mean_absolute_error>`_.
.. math:: \text{MAE} = \frac{1}{N} \sum_{i=1}^N \lvert y_{i} - x_{i} \rvert
where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanAbsoluteError()
metric.attach(default_evaluator, 'mae')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['mae'])
.. testoutput::
2.9375
"""
_state_dict_all_req_keys = ("_sum_of_absolute_errors", "_num_examples")
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_absolute_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
absolute_errors = torch.abs(y_pred - y.view_as(y_pred))
self._sum_of_absolute_errors += torch.sum(absolute_errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_absolute_errors", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanAbsoluteError must have at least one example before it can be computed.")
return self._sum_of_absolute_errors.item() / self._num_examples
|
import warnings
from typing import Callable, Optional, Sequence, Union
import torch
import torch.nn.functional as F
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["SSIM"]
class SSIM(Metric):
"""
Computes Structural Similarity Index Measure
- ``update`` must receive output of the form ``(y_pred, y)``. They have to be of the same type.
Valid :class:`torch.dtype` are the following:
- on CPU: `torch.float32`, `torch.float64`.
- on CUDA: `torch.float16`, `torch.bfloat16`, `torch.float32`, `torch.float64`.
Args:
data_range: Range of the image. Typically, ``1.0`` or ``255``.
kernel_size: Size of the kernel. Default: (11, 11)
sigma: Standard deviation of the gaussian kernel.
Argument is used if ``gaussian=True``. Default: (1.5, 1.5)
k1: Parameter of SSIM. Default: 0.01
k2: Parameter of SSIM. Default: 0.03
gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel
output_transform: A callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need
to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = SSIM(data_range=1.0)
metric.attach(default_evaluator, 'ssim')
preds = torch.rand([4, 3, 16, 16])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['ssim'])
.. testoutput::
0.9218971...
.. versionadded:: 0.4.2
"""
_state_dict_all_req_keys = ("_sum_of_ssim", "_num_examples", "_kernel")
def __init__(
self,
data_range: Union[int, float],
kernel_size: Union[int, Sequence[int]] = (11, 11),
sigma: Union[float, Sequence[float]] = (1.5, 1.5),
k1: float = 0.01,
k2: float = 0.03,
gaussian: bool = True,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if isinstance(kernel_size, int):
self.kernel_size: Sequence[int] = [kernel_size, kernel_size]
elif isinstance(kernel_size, Sequence):
self.kernel_size = kernel_size
else:
raise ValueError("Argument kernel_size should be either int or a sequence of int.")
if isinstance(sigma, float):
self.sigma: Sequence[float] = [sigma, sigma]
elif isinstance(sigma, Sequence):
self.sigma = sigma
else:
raise ValueError("Argument sigma should be either float or a sequence of float.")
if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):
raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.")
if any(y <= 0 for y in self.sigma):
raise ValueError(f"Expected sigma to have positive number. Got {sigma}.")
super(SSIM, self).__init__(output_transform=output_transform, device=device)
self.gaussian = gaussian
self.data_range = data_range
self.c1 = (k1 * data_range) ** 2
self.c2 = (k2 * data_range) ** 2
self.pad_h = (self.kernel_size[0] - 1) // 2
self.pad_w = (self.kernel_size[1] - 1) // 2
self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
self._kernel: Optional[torch.Tensor] = None
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self._num_examples = 0
def _uniform(self, kernel_size: int) -> torch.Tensor:
kernel = torch.zeros(kernel_size)
start_uniform_index = max(kernel_size // 2 - 2, 0)
end_uniform_index = min(kernel_size // 2 + 3, kernel_size)
min_, max_ = -2.5, 2.5
kernel[start_uniform_index:end_uniform_index] = 1 / (max_ - min_)
return kernel.unsqueeze(dim=0) # (1, kernel_size)
def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:
ksize_half = (kernel_size - 1) * 0.5
kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))
return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:
if self.gaussian:
kernel_x = self._gaussian(kernel_size[0], sigma[0])
kernel_y = self._gaussian(kernel_size[1], sigma[1])
else:
kernel_x = self._uniform(kernel_size[0])
kernel_y = self._uniform(kernel_size[1])
return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.dtype != y.dtype:
raise TypeError(
f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
)
if y_pred.shape != y.shape:
raise ValueError(
f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
if len(y_pred.shape) != 4 or len(y.shape) != 4:
raise ValueError(
f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
# converts potential integer tensor to fp
if not y.is_floating_point():
y = y.float()
if not y_pred.is_floating_point():
y_pred = y_pred.float()
nb_channel = y_pred.size(1)
if self._kernel is None or self._kernel.shape[0] != nb_channel:
self._kernel = self._kernel_2d.expand(nb_channel, 1, -1, -1)
if y_pred.device != self._kernel.device:
if self._kernel.device == torch.device("cpu"):
self._kernel = self._kernel.to(device=y_pred.device)
elif y_pred.device == torch.device("cpu"):
warnings.warn(
"y_pred tensor is on cpu device but previous computation was on another device: "
f"{self._kernel.device}. To avoid having a performance hit, please ensure that all "
"y and y_pred tensors are on the same device.",
)
y_pred = y_pred.to(device=self._kernel.device)
y = y.to(device=self._kernel.device)
y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
if y_pred.dtype != self._kernel.dtype:
self._kernel = self._kernel.to(dtype=y_pred.dtype)
input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y]
outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=nb_channel)
batch_size = y_pred.size(0)
output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))]
mu_pred_sq = output_list[0].pow(2)
mu_target_sq = output_list[1].pow(2)
mu_pred_target = output_list[0] * output_list[1]
sigma_pred_sq = output_list[2] - mu_pred_sq
sigma_target_sq = output_list[3] - mu_target_sq
sigma_pred_target = output_list[4] - mu_pred_target
a1 = 2 * mu_pred_target + self.c1
a2 = 2 * sigma_pred_target + self.c2
b1 = mu_pred_sq + mu_target_sq + self.c1
b2 = sigma_pred_sq + sigma_target_sq + self.c2
ssim_idx = (a1 * a2) / (b1 * b2)
self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(device=self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_ssim", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("SSIM must have at least one example before it can be computed.")
return (self._sum_of_ssim / self._num_examples).item()
|
from typing import Sequence
import torch
from ignite.metrics.metric import reinit__is_reduced
from ignite.metrics.precision import _BasePrecisionRecall
__all__ = ["Recall"]
class Recall(_BasePrecisionRecall):
r"""Calculates recall for binary, multiclass and multilabel data.
.. math:: \text{Recall} = \frac{ TP }{ TP + FN }
where :math:`\text{TP}` is true positives and :math:`\text{FN}` is false negatives.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
average: available options are
False
default option. For multicalss and multilabel inputs, per class and per label
metric is returned respectively.
None
like `False` option except that per class metric is returned for binary data as well.
For compatibility with Scikit-Learn api.
'micro'
Metric is computed counting stats of classes/labels altogether.
.. math::
\text{Micro Recall} = \frac{\sum_{k=1}^C TP_k}{\sum_{k=1}^C TP_k+FN_k}
where :math:`C` is the number of classes/labels (2 in binary case). :math:`k` in
:math:`TP_k` and :math:`FN_k`means that the measures are computed for class/label :math:`k` (in
a one-vs-rest sense in multiclass case).
For binary and multiclass inputs, this is equivalent with accuracy,
so use :class:`~ignite.metrics.accuracy.Accuracy`.
'samples'
for multilabel input, at first, recall is computed on a
per sample basis and then average across samples is returned.
.. math::
\text{Sample-averaged Recall} = \frac{\sum_{n=1}^N \frac{TP_n}{TP_n+FN_n}}{N}
where :math:`N` is the number of samples. :math:`n` in :math:`TP_n` and :math:`FN_n`
means that the measures are computed for sample :math:`n`, across labels.
Incompatible with binary and multiclass inputs.
'weighted'
like macro recall but considers class/label imbalance. For binary and multiclass
input, it computes metric for each class then returns average of them weighted by
support of classes (number of actual samples in each class). For multilabel input,
it computes recall for each label then returns average of them weighted by support
of labels (number of actual positive samples in each label).
.. math::
Recall_k = \frac{TP_k}{TP_k+FN_k}
.. math::
\text{Weighted Recall} = \frac{\sum_{k=1}^C P_k * Recall_k}{N}
where :math:`C` is the number of classes (2 in binary case). :math:`P_k` is the number
of samples belonged to class :math:`k` in binary and multiclass case, and the number of
positive samples belonged to label :math:`k` in multilabel case.
Note that for binary and multiclass data, weighted recall is equivalent
with accuracy, so use :class:`~ignite.metrics.accuracy.Accuracy`.
macro
computes macro recall which is unweighted average of metric computed across
classes or labels.
.. math::
\text{Macro Recall} = \frac{\sum_{k=1}^C Recall_k}{C}
where :math:`C` is the number of classes (2 in binary case).
True
like macro option. For backward compatibility.
is_multilabel: flag to use in multilabel case. By default, value is False.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case. In binary and multilabel cases, the elements of
`y` and `y_pred` should have 0 or 1 values.
.. testcode:: 1
metric = Recall()
two_class_metric = Recall(average=None) # Returns recall for both classes
metric.attach(default_evaluator, "recall")
two_class_metric.attach(default_evaluator, "both classes recall")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Recall: {state.metrics['recall']}")
print(f"Recall for class 0 and class 1: {state.metrics['both classes recall']}")
.. testoutput:: 1
Recall: 0.75
Recall for class 0 and class 1: tensor([0.5000, 0.7500], dtype=torch.float64)
Multiclass case
.. testcode:: 2
metric = Recall()
macro_metric = Recall(average=True)
metric.attach(default_evaluator, "recall")
macro_metric.attach(default_evaluator, "macro recall")
y_true = torch.tensor([2, 0, 2, 1, 0])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288]
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Recall: {state.metrics['recall']}")
print(f"Macro Recall: {state.metrics['macro recall']}")
.. testoutput:: 2
Recall: tensor([0.5000, 0.0000, 0.5000], dtype=torch.float64)
Macro Recall: 0.3333333333333333
Multilabel case, the shapes must be (batch_size, num_categories, ...)
.. testcode:: 3
metric = Recall(is_multilabel=True)
micro_metric = Recall(is_multilabel=True, average='micro')
macro_metric = Recall(is_multilabel=True, average=True)
samples_metric = Recall(is_multilabel=True, average='samples')
metric.attach(default_evaluator, "recall")
micro_metric.attach(default_evaluator, "micro recall")
macro_metric.attach(default_evaluator, "macro recall")
samples_metric.attach(default_evaluator, "samples recall")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Recall: {state.metrics['recall']}")
print(f"Micro Recall: {state.metrics['micro recall']}")
print(f"Macro Recall: {state.metrics['macro recall']}")
print(f"Samples Recall: {state.metrics['samples recall']}")
.. testoutput:: 3
Recall: tensor([1., 1., 0.], dtype=torch.float64)
Micro Recall: 0.5
Macro Recall: 0.6666666666666666
Samples Recall: 0.3
Thresholding of predictions can be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
metric = Recall(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "recall")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['recall'])
.. testoutput:: 4
0.75
.. versionchanged:: 0.4.10
Some new options were added to `average` parameter.
"""
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
self._check_type(output)
_, y, correct = self._prepare_output(output)
if self._average == "samples":
actual_positives = y.sum(dim=1)
true_positives = correct.sum(dim=1)
self._numerator += torch.sum(true_positives / (actual_positives + self.eps))
self._denominator += y.size(0)
elif self._average == "micro":
self._denominator += y.sum()
self._numerator += correct.sum()
else: # _average in [False, 'macro', 'weighted']
self._denominator += y.sum(dim=0)
self._numerator += correct.sum(dim=0)
if self._average == "weighted":
self._weight += y.sum(dim=0)
self._updated = True
|
from typing import Callable, Optional, Union
import torch
from ignite.metrics.metrics_lambda import MetricsLambda
from ignite.metrics.precision import Precision
from ignite.metrics.recall import Recall
__all__ = ["Fbeta"]
def Fbeta(
beta: float,
average: bool = True,
precision: Optional[Precision] = None,
recall: Optional[Recall] = None,
output_transform: Optional[Callable] = None,
device: Union[str, torch.device] = torch.device("cpu"),
) -> MetricsLambda:
r"""Calculates F-beta score.
.. math::
F_\beta = \left( 1 + \beta^2 \right) * \frac{ \text{precision} * \text{recall} }
{ \left( \beta^2 * \text{precision} \right) + \text{recall} }
where :math:`\beta` is a positive real factor.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
Args:
beta: weight of precision in harmonic mean
average: if True, F-beta score is computed as the unweighted average (across all classes
in multiclass case), otherwise, returns a tensor with F-beta score for each class in multiclass case.
precision: precision object metric with `average=False` to compute F-beta score
recall: recall object metric with `average=False` to compute F-beta score
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. It is used only if precision or recall are not provided.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Returns:
MetricsLambda, F-beta metric
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case
.. testcode:: 1
P = Precision(average=False)
R = Recall(average=False)
metric = Fbeta(beta=1.0, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 1
0.7499...
Multiclass case
.. testcode:: 2
P = Precision(average=False)
R = Recall(average=False)
metric = Fbeta(beta=1.0, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 2
0.5222...
F-beta can be computed for each class as done below:
.. testcode:: 3
P = Precision(average=False)
R = Recall(average=False)
metric = Fbeta(beta=1.0, average=False, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 3
tensor([0.5000, 0.6667, 0.4000], dtype=torch.float64)
The elements of `y` and `y_pred` should have 0 or 1 values. Thresholding of predictions can
be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
P = Precision(average=False, output_transform=thresholded_output_transform)
R = Recall(average=False, output_transform=thresholded_output_transform)
metric = Fbeta(beta=1.0, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 4
0.7499...
"""
if not (beta > 0):
raise ValueError(f"Beta should be a positive integer, but given {beta}")
if precision is not None and output_transform is not None:
raise ValueError("If precision argument is provided, output_transform should be None")
if recall is not None and output_transform is not None:
raise ValueError("If recall argument is provided, output_transform should be None")
if precision is None:
precision = Precision(
output_transform=(lambda x: x) if output_transform is None else output_transform, # type: ignore[arg-type]
average=False,
device=device,
)
elif precision._average:
raise ValueError("Input precision metric should have average=False")
if recall is None:
recall = Recall(
output_transform=(lambda x: x) if output_transform is None else output_transform, # type: ignore[arg-type]
average=False,
device=device,
)
elif recall._average:
raise ValueError("Input recall metric should have average=False")
fbeta = (1.0 + beta**2) * precision * recall / (beta**2 * precision + recall + 1e-15)
if average:
fbeta = fbeta.mean().item()
return fbeta
|
import math
from typing import Union
import torch
from ignite.metrics.mean_squared_error import MeanSquaredError
__all__ = ["RootMeanSquaredError"]
class RootMeanSquaredError(MeanSquaredError):
r"""Calculates the `root mean squared error <https://en.wikipedia.org/wiki/Root-mean-square_deviation>`_.
.. math:: \text{RMSE} = \sqrt{ \frac{1}{N} \sum_{i=1}^N \|y_{i} - x_{i} \|^2 }
where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = RootMeanSquaredError()
metric.attach(default_evaluator, 'rmse')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['rmse'])
.. testoutput::
1.956559480312316
"""
def compute(self) -> Union[torch.Tensor, float]:
mse = super(RootMeanSquaredError, self).compute()
return math.sqrt(mse)
|
from ignite.metrics.accumulation import Average, GeometricAverage, VariableAccumulation
from ignite.metrics.accuracy import Accuracy
from ignite.metrics.classification_report import ClassificationReport
from ignite.metrics.confusion_matrix import ConfusionMatrix, DiceCoefficient, IoU, JaccardIndex, mIoU
from ignite.metrics.epoch_metric import EpochMetric
from ignite.metrics.fbeta import Fbeta
from ignite.metrics.frequency import Frequency
from ignite.metrics.gan.fid import FID
from ignite.metrics.gan.inception_score import InceptionScore
from ignite.metrics.loss import Loss
from ignite.metrics.mean_absolute_error import MeanAbsoluteError
from ignite.metrics.mean_pairwise_distance import MeanPairwiseDistance
from ignite.metrics.mean_squared_error import MeanSquaredError
from ignite.metrics.metric import BatchFiltered, BatchWise, EpochWise, Metric, MetricUsage
from ignite.metrics.metrics_lambda import MetricsLambda
from ignite.metrics.multilabel_confusion_matrix import MultiLabelConfusionMatrix
from ignite.metrics.nlp.bleu import Bleu
from ignite.metrics.nlp.rouge import Rouge, RougeL, RougeN
from ignite.metrics.precision import Precision
from ignite.metrics.psnr import PSNR
from ignite.metrics.recall import Recall
from ignite.metrics.root_mean_squared_error import RootMeanSquaredError
from ignite.metrics.running_average import RunningAverage
from ignite.metrics.ssim import SSIM
from ignite.metrics.top_k_categorical_accuracy import TopKCategoricalAccuracy
__all__ = [
"Metric",
"Accuracy",
"Loss",
"MetricsLambda",
"MeanAbsoluteError",
"MeanPairwiseDistance",
"MeanSquaredError",
"ConfusionMatrix",
"ClassificationReport",
"TopKCategoricalAccuracy",
"Average",
"DiceCoefficient",
"EpochMetric",
"Fbeta",
"FID",
"GeometricAverage",
"IoU",
"InceptionScore",
"mIoU",
"JaccardIndex",
"MultiLabelConfusionMatrix",
"Precision",
"PSNR",
"Recall",
"RootMeanSquaredError",
"RunningAverage",
"VariableAccumulation",
"Frequency",
"SSIM",
"Bleu",
"Rouge",
"RougeN",
"RougeL",
]
|
import numbers
from typing import Callable, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["VariableAccumulation", "GeometricAverage", "Average"]
class VariableAccumulation(Metric):
"""Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable.
- ``update`` must receive output of the form `x`.
- `x` can be a number or `torch.Tensor`.
Note:
The class stores input into two public variables: `accumulator` and `num_examples`.
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
Args:
op: a callable to update accumulator. Method's signature is `(accumulator, output)`.
For example, to compute arithmetic mean value, `op = lambda a, x: a + x`.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
"""
required_output_keys = None
_state_dict_all_req_keys = ("accumulator", "num_examples")
def __init__(
self,
op: Callable,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if not callable(op):
raise TypeError(f"Argument op should be a callable, but given {type(op)}")
self._op = op
super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self.num_examples = 0
def _check_output_type(self, output: Union[float, torch.Tensor]) -> None:
if not isinstance(output, (numbers.Number, torch.Tensor)):
raise TypeError(f"Output should be a number or torch.Tensor, but given {type(output)}")
@reinit__is_reduced
def update(self, output: Union[float, torch.Tensor]) -> None:
self._check_output_type(output)
if isinstance(output, torch.Tensor):
output = output.detach()
if not (output.device == self._device and output.dtype == self.accumulator.dtype):
output = output.to(self.accumulator)
self.accumulator = self._op(self.accumulator, output)
if isinstance(output, torch.Tensor):
self.num_examples += output.shape[0] if len(output.shape) > 1 else 1
else:
self.num_examples += 1
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Tuple[torch.Tensor, int]:
return self.accumulator, self.num_examples
class Average(VariableAccumulation):
"""Helper class to compute arithmetic average of a single variable.
- ``update`` must receive output of the form `x`.
- `x` can be a number or `torch.Tensor`.
Note:
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`
``output_tranform`` can be added to the metric to transform the output into the form expected by the metric.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = Average()
metric.attach(default_evaluator, 'avg')
# Case 1. input is er
data = torch.tensor([0, 1, 2, 3, 4])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
2.0
.. testcode::
metric = Average()
metric.attach(default_evaluator, 'avg')
# Case 2. input is a 1D torch.Tensor
data = torch.tensor([
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]
])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([1.5000, 1.5000, 1.5000], dtype=torch.float64)
.. testcode::
metric = Average()
metric.attach(default_evaluator, 'avg')
# Case 3. input is a ND torch.Tensor
data = [
torch.tensor([[0, 0, 0], [1, 1, 1]]),
torch.tensor([[2, 2, 2], [3, 3, 3]])
]
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([1.5000, 1.5000, 1.5000], dtype=torch.float64)
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:
if isinstance(x, torch.Tensor) and x.ndim > 1:
x = x.sum(dim=0)
return a + x
super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self.num_examples < 1:
raise NotComputableError(
f"{self.__class__.__name__} must have at least one example before it can be computed."
)
return self.accumulator / self.num_examples
class GeometricAverage(VariableAccumulation):
"""Helper class to compute geometric average of a single variable.
- ``update`` must receive output of the form `x`.
- `x` can be a positive number or a positive `torch.Tensor`, such that ``torch.log(x)`` is not `nan`.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Note:
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`
``output_tranform`` can be added to the metric to transform the output into the form expected by the metric.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = GeometricAverage()
metric.attach(default_evaluator, 'avg')
# Case 1. input is er
data = torch.tensor([1, 2, 3])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
1.8171...
.. testcode::
metric = GeometricAverage()
metric.attach(default_evaluator, 'avg')
# Case 2. input is a 1D torch.Tensor
data = torch.tensor([
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4],
])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([2.2134, 2.2134, 2.2134], dtype=torch.float64)
.. testcode::
metric = GeometricAverage()
metric.attach(default_evaluator, 'avg')
# Case 3. input is a ND torch.Tensor
data = [
torch.tensor([[1, 1, 1], [2, 2, 2]]),
torch.tensor([[3, 3, 3], [4, 4, 4]])
]
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([2.2134, 2.2134, 2.2134], dtype=torch.float64)
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
x = torch.log(x)
if x.ndim > 1:
x = x.sum(dim=0)
return a + x
super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self.num_examples < 1:
raise NotComputableError(
f"{self.__class__.__name__} must have at least one example before it can be computed."
)
tensor = torch.exp(self.accumulator / self.num_examples)
if tensor.numel() == 1:
return tensor.item()
return tensor
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from collections.abc import Mapping
from functools import wraps
from numbers import Number
from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
import torch
import ignite.distributed as idist
from ignite.base.mixins import Serializable
from ignite.engine import CallableEventWithFilter, Engine, Events
if TYPE_CHECKING:
from ignite.metrics.metrics_lambda import MetricsLambda
__all__ = [
"Metric",
"MetricUsage",
"EpochWise",
"BatchWise",
"BatchFiltered",
"RunningEpochWise",
"RunningBatchWise",
"SingleEpochRunningBatchWise",
]
class MetricUsage:
"""
Base class for all usages of metrics.
A usage of metric defines the events when a metric starts to compute, updates and completes.
Valid events are from :class:`~ignite.engine.events.Events`.
Args:
started: event when the metric starts to compute. This event will be associated to
:meth:`~ignite.metrics.metric.Metric.started`.
completed: event when the metric completes. This event will be associated to
:meth:`~ignite.metrics.metric.Metric.completed`.
iteration_completed: event when the metric updates. This event will be associated to
:meth:`~ignite.metrics.metric.Metric.iteration_completed`.
"""
usage_name: str
def __init__(self, started: Events, completed: Events, iteration_completed: CallableEventWithFilter) -> None:
self.__started = started
self.__completed = completed
self.__iteration_completed = iteration_completed
@property
def STARTED(self) -> Events:
return self.__started
@property
def COMPLETED(self) -> Events:
return self.__completed
@property
def ITERATION_COMPLETED(self) -> CallableEventWithFilter:
return self.__iteration_completed
class EpochWise(MetricUsage):
"""
Epoch-wise usage of Metrics. It's the default and most common usage of metrics.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``EPOCH_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``EPOCH_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "epoch_wise"
def __init__(self) -> None:
super(EpochWise, self).__init__(
started=Events.EPOCH_STARTED,
completed=Events.EPOCH_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class RunningEpochWise(EpochWise):
"""
Running epoch-wise usage of Metrics. It's the running version of the :class:`~.metrics.metric.EpochWise` metric
usage. A metric with such a usage most likely accompanies an :class:`~.metrics.metric.EpochWise` one to compute
a running measure of it e.g. running average.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``EPOCH_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``EPOCH_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "running_epoch_wise"
def __init__(self) -> None:
super(EpochWise, self).__init__(
started=Events.STARTED,
completed=Events.EPOCH_COMPLETED,
iteration_completed=Events.EPOCH_COMPLETED,
)
class BatchWise(MetricUsage):
"""
Batch-wise usage of Metrics.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``ITERATION_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``ITERATION_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "batch_wise"
def __init__(self) -> None:
super(BatchWise, self).__init__(
started=Events.ITERATION_STARTED,
completed=Events.ITERATION_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class RunningBatchWise(BatchWise):
"""
Running batch-wise usage of Metrics. It's the running version of the :class:`~.metrics.metric.EpochWise` metric
usage. A metric with such a usage could for example accompany a :class:`~.metrics.metric.BatchWise` one to compute
a running measure of it e.g. running average.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``ITERATION_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "running_batch_wise"
def __init__(self) -> None:
super(BatchWise, self).__init__(
started=Events.STARTED,
completed=Events.ITERATION_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class SingleEpochRunningBatchWise(BatchWise):
"""
Running batch-wise usage of Metrics in a single epoch. It's like :class:`~.metrics.metric.RunningBatchWise` metric
usage with the difference that is used during a single epoch.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``EPOCH_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``ITERATION_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "single_epoch_running_batch_wise"
def __init__(self) -> None:
super(BatchWise, self).__init__(
started=Events.EPOCH_STARTED,
completed=Events.ITERATION_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class BatchFiltered(MetricUsage):
"""
Batch filtered usage of Metrics. This usage is similar to epoch-wise but update event is filtered.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``EPOCH_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on filtered ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``EPOCH_COMPLETED``.
Args:
args: Positional arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED`
kwargs: Keyword arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED`
handled by :meth:`~ignite.metrics.metric.Metric.iteration_completed`.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(BatchFiltered, self).__init__(
started=Events.EPOCH_STARTED,
completed=Events.EPOCH_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED(*args, **kwargs),
)
class Metric(Serializable, metaclass=ABCMeta):
"""
Base class for all Metrics.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Attributes:
required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the
latter is a dictionary. By default, ``("y_pred", "y")``. This is useful with custom metrics that can require
other arguments than predictions ``y_pred`` and targets ``y``. See an example below.
Examples:
Let's implement a custom metric that requires ``y_pred``, ``y`` and ``x`` as input for ``update`` function.
In the example below we show how to setup standard metric like Accuracy and the custom metric using by an
``evaluator`` created with :meth:`~ignite.engine.create_supervised_evaluator` method.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. code-block:: python
# https://discuss.pytorch.org/t/how-access-inputs-in-custom-ignite-metric/91221/5
import torch
import torch.nn as nn
from ignite.metrics import Metric, Accuracy
from ignite.engine import create_supervised_evaluator
class CustomMetric(Metric):
required_output_keys = ("y_pred", "y", "x")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, output):
y_pred, y, x = output
# ...
def reset(self):
# ...
pass
def compute(self):
# ...
pass
model = ...
metrics = {
"Accuracy": Accuracy(),
"CustomMetric": CustomMetric()
}
evaluator = create_supervised_evaluator(
model,
metrics=metrics,
output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred}
)
res = evaluator.run(data)
.. versionchanged:: 0.4.2
``required_output_keys`` became public attribute.
"""
# public class attribute
required_output_keys: Optional[Tuple] = ("y_pred", "y")
# for backward compatibility
_required_output_keys = required_output_keys
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
self._output_transform = output_transform
# Some metrics have a large performance regression when run on XLA devices, so for now, we disallow it.
if torch.device(device).type == "xla":
raise ValueError("Cannot create metric on an XLA device. Use device='cpu' instead.")
self._device = torch.device(device)
self.reset()
@abstractmethod
def reset(self) -> None:
"""
Resets the metric to its initial state.
By default, this is called at the start of each epoch.
"""
pass
@abstractmethod
def update(self, output: Any) -> None:
"""
Updates the metric's state using the passed batch output.
By default, this is called once for each batch.
Args:
output: the is the output from the engine's process function.
"""
pass
@abstractmethod
def compute(self) -> Any:
"""
Computes the metric based on its accumulated state.
By default, this is called at the end of each epoch.
Returns:
Any: | the actual quantity of interest. However, if a :class:`~collections.abc.Mapping` is returned,
it will be (shallow) flattened into `engine.state.metrics` when
:func:`~ignite.metrics.metric.Metric.completed` is called.
Raises:
NotComputableError: raised when the metric cannot be computed.
"""
pass
def started(self, engine: Engine) -> None:
"""Helper method to start data gathering for metric's computation. It is automatically attached to the
`engine` with :meth:`~ignite.metrics.metric.Metric.attach`.
Args:
engine: the engine to which the metric must be attached
"""
self.reset()
@torch.no_grad()
def iteration_completed(self, engine: Engine) -> None:
"""Helper method to update metric's computation. It is automatically attached to the
`engine` with :meth:`~ignite.metrics.metric.Metric.attach`.
Args:
engine: the engine to which the metric must be attached
Note:
``engine.state.output`` is used to compute metric values.
The majority of implemented metrics accept the following formats for ``engine.state.output``:
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. ``y_pred`` and ``y`` can be torch tensors or
list of tensors/numbers if applicable.
.. versionchanged:: 0.4.5
``y_pred`` and ``y`` can be torch tensors or list of tensors/numbers
"""
output = self._output_transform(engine.state.output)
if isinstance(output, Mapping):
if self.required_output_keys is None:
raise TypeError(
f"Transformed engine output for {self.__class__.__name__} metric should be a tuple/list, "
f"but given {type(output)}"
)
if not all([k in output for k in self.required_output_keys]):
raise ValueError(
"When transformed engine's output is a mapping, "
f"it should contain {self.required_output_keys} keys, but given {list(output.keys())}"
)
output = tuple(output[k] for k in self.required_output_keys)
if isinstance(output, Sequence) and all([_is_list_of_tensors_or_numbers(o) for o in output]):
if not (len(output) == 2 and len(output[0]) == len(output[1])):
raise ValueError(
f"Output should have 2 items of the same length, "
f"got {len(output)} and {len(output[0])}, {len(output[1])}"
)
for o1, o2 in zip(output[0], output[1]):
# o1 and o2 are list of tensors or numbers
tensor_o1 = _to_batched_tensor(o1)
tensor_o2 = _to_batched_tensor(o2, device=tensor_o1.device)
self.update((tensor_o1, tensor_o2))
else:
self.update(output)
def completed(self, engine: Engine, name: str) -> None:
"""Helper method to compute metric's value and put into the engine. It is automatically attached to the
`engine` with :meth:`~ignite.metrics.metric.Metric.attach`. If metrics' value is torch tensor, it is
explicitly sent to CPU device.
Args:
engine: the engine to which the metric must be attached
name: the name of the metric used as key in dict `engine.state.metrics`
.. versionchanged:: 0.4.3
Added dict in metrics results.
.. versionchanged:: 0.4.5
metric's value is put on CPU if torch tensor.
"""
result = self.compute()
if isinstance(result, Mapping):
if name in result.keys():
raise ValueError(f"Argument name '{name}' is conflicting with mapping keys: {list(result.keys())}")
for key, value in result.items():
engine.state.metrics[key] = value
engine.state.metrics[name] = result
else:
if isinstance(result, torch.Tensor):
if len(result.size()) == 0:
result = result.item()
elif "cpu" not in result.device.type:
result = result.cpu()
engine.state.metrics[name] = result
def _check_usage(self, usage: Union[str, MetricUsage]) -> MetricUsage:
if isinstance(usage, str):
usages = [EpochWise, RunningEpochWise, BatchWise, RunningBatchWise, SingleEpochRunningBatchWise]
for usage_cls in usages:
if usage == usage_cls.usage_name:
usage = usage_cls()
break
if not isinstance(usage, MetricUsage):
raise ValueError(
"Argument usage should be '(Running)EpochWise.usage_name' or "
f"'((SingleEpoch)Running)BatchWise.usage_name', got {usage}"
)
if not isinstance(usage, MetricUsage):
raise TypeError(f"Unhandled usage type {type(usage)}")
return usage
def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = EpochWise()) -> None:
"""
Attaches current metric to provided engine. On the end of engine's run, `engine.state.metrics` dictionary will
contain computed metric's value under provided name.
Args:
engine: the engine to which the metric must be attached
name: the name of the metric to attach
usage: the usage of the metric. Valid string values should be
:attr:`ignite.metrics.metric.EpochWise.usage_name` (default) or
:attr:`ignite.metrics.metric.BatchWise.usage_name`.
Examples:
.. code-block:: python
metric = ...
metric.attach(engine, "mymetric")
assert "mymetric" in engine.run(data).metrics
assert metric.is_attached(engine)
Example with usage:
.. code-block:: python
metric = ...
metric.attach(engine, "mymetric", usage=BatchWise.usage_name)
assert "mymetric" in engine.run(data).metrics
assert metric.is_attached(engine, usage=BatchWise.usage_name)
"""
usage = self._check_usage(usage)
if not engine.has_event_handler(self.started, usage.STARTED):
engine.add_event_handler(usage.STARTED, self.started)
if not engine.has_event_handler(self.iteration_completed, usage.ITERATION_COMPLETED):
engine.add_event_handler(usage.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(usage.COMPLETED, self.completed, name)
def detach(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> None:
"""
Detaches current metric from the engine and no metric's computation is done during the run.
This method in conjunction with :meth:`~ignite.metrics.metric.Metric.attach` can be useful if several
metrics need to be computed with different periods. For example, one metric is computed every training epoch
and another metric (e.g. more expensive one) is done every n-th training epoch.
Args:
engine: the engine from which the metric must be detached
usage: the usage of the metric. Valid string values should be
'epoch_wise' (default) or 'batch_wise'.
Examples:
.. code-block:: python
metric = ...
engine = ...
metric.detach(engine)
assert "mymetric" not in engine.run(data).metrics
assert not metric.is_attached(engine)
Example with usage:
.. code-block:: python
metric = ...
engine = ...
metric.detach(engine, usage="batch_wise")
assert "mymetric" not in engine.run(data).metrics
assert not metric.is_attached(engine, usage="batch_wise")
"""
usage = self._check_usage(usage)
if engine.has_event_handler(self.completed, usage.COMPLETED):
engine.remove_event_handler(self.completed, usage.COMPLETED)
if engine.has_event_handler(self.started, usage.STARTED):
engine.remove_event_handler(self.started, usage.STARTED)
if engine.has_event_handler(self.iteration_completed, usage.ITERATION_COMPLETED):
engine.remove_event_handler(self.iteration_completed, usage.ITERATION_COMPLETED)
def is_attached(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> bool:
"""
Checks if current metric is attached to provided engine. If attached, metric's computed
value is written to `engine.state.metrics` dictionary.
Args:
engine: the engine checked from which the metric should be attached
usage: the usage of the metric. Valid string values should be
'epoch_wise' (default) or 'batch_wise'.
"""
usage = self._check_usage(usage)
return engine.has_event_handler(self.completed, usage.COMPLETED)
def state_dict(self) -> OrderedDict:
"""Method returns state dict with attributes of the metric specified in its
`_state_dict_all_req_keys` attribute. Can be used to save internal state of the class.
If there's an active distributed configuration, some collective operations is done and
the list of values across ranks is saved under each attribute's name in the dict.
"""
state = OrderedDict()
for attr_name in self._state_dict_all_req_keys:
if attr_name not in self.__dict__:
raise ValueError(
f"Found a value in _state_dict_all_req_keys that is not among metric attributes: {attr_name}"
)
attr = getattr(self, attr_name)
if not isinstance(attr, (int, float, torch.Tensor)):
raise TypeError(
"Currently, only numeric or tensor-typed attributes of the metric"
" could be added to its state_dict."
)
if idist.get_world_size() == 1:
state[attr_name] = [attr]
else:
if isinstance(attr, (int, float)):
attr_type = type(attr)
attr = float(attr)
gathered_attr = cast(List[Any], idist.all_gather(attr))
if isinstance(attr, float):
gathered_attr = [attr_type(process_attr) for process_attr in gathered_attr]
state[attr_name] = gathered_attr
return state
def load_state_dict(self, state_dict: Mapping) -> None:
"""Method replaces internal state of the class with provided state dict data.
If there's an active distributed configuration, the process uses its rank to pick the proper value from
the list of values saved under each attribute's name in the dict.
Args:
state_dict: a dict containing attributes of the metric specified in its `_state_dict_all_req_keys`
attribute.
"""
super().load_state_dict(state_dict)
rank = idist.get_rank()
for attr in self._state_dict_all_req_keys:
setattr(self, attr, state_dict[attr][rank])
def __add__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x + y, self, other)
def __radd__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x + y, other, self)
def __sub__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x - y, self, other)
def __rsub__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x - y, other, self)
def __mul__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x * y, self, other)
def __rmul__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x * y, other, self)
def __pow__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x**y, self, other)
def __rpow__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x**y, other, self)
def __mod__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x % y, self, other)
def __truediv__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)
def __rtruediv__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)
def __floordiv__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x // y, self, other)
def __getattr__(self, attr: str) -> Callable:
from ignite.metrics.metrics_lambda import MetricsLambda
def fn(x: Metric, *args: Any, **kwargs: Any) -> Any:
return getattr(x, attr)(*args, **kwargs)
def wrapper(*args: Any, **kwargs: Any) -> "MetricsLambda":
return MetricsLambda(fn, self, *args, **kwargs)
return wrapper
def __getitem__(self, index: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x: x[index], self)
def __getstate__(self) -> Dict:
return self.__dict__
def __setstate__(self, d: Dict) -> None:
self.__dict__.update(d)
def sync_all_reduce(*attrs: Any) -> Callable:
"""Helper decorator for distributed configuration to collect instance attribute value
across all participating processes and apply the specified reduction operation.
See :doc:`metrics` on how to use it.
Args:
attrs: attribute names of decorated class
.. versionchanged:: 0.4.5
- Ability to handle different reduction operations (SUM, MAX, MIN, PRODUCT).
"""
def wrapper(func: Callable) -> Callable:
@wraps(func)
def another_wrapper(self: Metric, *args: Any, **kwargs: Any) -> Callable:
if not isinstance(self, Metric):
raise RuntimeError(
"Decorator sync_all_reduce should be used on ignite.metric.Metric class methods only"
)
ws = idist.get_world_size()
unreduced_attrs = {}
if len(attrs) > 0 and ws > 1:
for attr in attrs:
op_kwargs = {}
if ":" in attr:
attr, op = attr.split(":")
valid_ops = ["MIN", "MAX", "SUM", "PRODUCT"]
if op not in valid_ops:
raise ValueError(f"Reduction operation is not valid (expected : {valid_ops}, got: {op}")
op_kwargs["op"] = op
if attr not in self.__dict__:
raise ValueError(f"Metric {type(self)} has no attribute named `{attr}`.")
t = getattr(self, attr)
if not isinstance(t, (Number, torch.Tensor)):
raise TypeError(
"Attribute provided to sync_all_reduce should be a "
f"number or tensor but `{attr}` has type {type(t)}"
)
unreduced_attrs[attr] = t
# Here `clone` is necessary since `idist.all_reduce` modifies `t` inplace in the case
# `t` is a tensor and its `device` is same as that of the process.
# TODO: Remove this dual behavior of `all_reduce` to always either return a new tensor or
# modify it in-place.
t_reduced = idist.all_reduce(cast(float, t) if isinstance(t, Number) else t.clone(), **op_kwargs)
setattr(self, attr, t_reduced)
result = func(self, *args, **kwargs)
for attr, value in unreduced_attrs.items():
setattr(self, attr, value)
return result
return another_wrapper
setattr(wrapper, "_decorated", True)
return wrapper
def reinit__is_reduced(func: Callable) -> Callable:
"""Helper decorator for distributed configuration.
See :doc:`metrics` on how to use it.
Args:
func: A callable to reinit.
"""
@wraps(func)
def wrapper(self: Metric, *args: Any, **kwargs: Any) -> None:
func(self, *args, **kwargs)
if "_result" in self.__dict__:
self._result = None # type: ignore[attr-defined]
setattr(wrapper, "_decorated", True)
return wrapper
def _is_list_of_tensors_or_numbers(x: Sequence[Union[torch.Tensor, float]]) -> bool:
return isinstance(x, Sequence) and all([isinstance(t, (torch.Tensor, Number)) for t in x])
def _to_batched_tensor(x: Union[torch.Tensor, float], device: Optional[torch.device] = None) -> torch.Tensor:
if isinstance(x, torch.Tensor):
return x.unsqueeze(dim=0)
return torch.tensor([x], device=device)
|
import warnings
from typing import Callable, cast, Optional, Sequence, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.accuracy import _BaseClassification
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
from ignite.utils import to_onehot
__all__ = ["Precision"]
class _BasePrecisionRecall(_BaseClassification):
_state_dict_all_req_keys = ("_numerator", "_denominator", "_weight")
def __init__(
self,
output_transform: Callable = lambda x: x,
average: Optional[Union[bool, str]] = False,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
if not (average is None or isinstance(average, bool) or average in ["macro", "micro", "weighted", "samples"]):
raise ValueError(
"Argument average should be None or a boolean or one of values"
" 'macro', 'micro', 'weighted' and 'samples'."
)
if average is True:
self._average: Optional[Union[bool, str]] = "macro"
else:
self._average = average
self.eps = 1e-20
self._updated = False
super(_BasePrecisionRecall, self).__init__(
output_transform=output_transform, is_multilabel=is_multilabel, device=device
)
def _check_type(self, output: Sequence[torch.Tensor]) -> None:
super()._check_type(output)
if self._type in ["binary", "multiclass"] and self._average == "samples":
raise ValueError("Argument average='samples' is incompatible with binary and multiclass input data.")
y_pred, y = output
if self._type == "multiclass" and y.dtype != torch.long:
warnings.warn("`y` should be of dtype long when entry type is multiclass", RuntimeWarning)
if (
self._type == "binary"
and self._average is not False
and (y.dtype != torch.long or y_pred.dtype != torch.long)
):
warnings.warn(
"`y` and `y_pred` should be of dtype long when entry type is binary and average!=False", RuntimeWarning
)
def _prepare_output(self, output: Sequence[torch.Tensor]) -> Sequence[torch.Tensor]:
y_pred, y = output[0].detach(), output[1].detach()
if self._type == "binary" or self._type == "multiclass":
num_classes = 2 if self._type == "binary" else y_pred.size(1)
if self._type == "multiclass" and y.max() + 1 > num_classes:
raise ValueError(
f"y_pred contains fewer classes than y. Number of classes in the prediction is {num_classes}"
f" and an element in y has invalid class = {y.max().item() + 1}."
)
y = y.view(-1)
if self._type == "binary" and self._average is False:
y_pred = y_pred.view(-1)
else:
y = to_onehot(y.long(), num_classes=num_classes)
indices = torch.argmax(y_pred, dim=1) if self._type == "multiclass" else y_pred.long()
y_pred = to_onehot(indices.view(-1), num_classes=num_classes)
elif self._type == "multilabel":
# if y, y_pred shape is (N, C, ...) -> (N * ..., C)
num_labels = y_pred.size(1)
y_pred = torch.transpose(y_pred, 1, -1).reshape(-1, num_labels)
y = torch.transpose(y, 1, -1).reshape(-1, num_labels)
# Convert from int cuda/cpu to double on self._device
y_pred = y_pred.to(dtype=torch.float64, device=self._device)
y = y.to(dtype=torch.float64, device=self._device)
correct = y * y_pred
return y_pred, y, correct
@reinit__is_reduced
def reset(self) -> None:
"""
`numerator`, `denominator` and `weight` are three variables chosen to be abstract
representatives of the ones that are measured for cases with different `average` parameters.
`weight` is only used when `average='weighted'`. Actual value of these three variables is
as follows.
average='samples':
numerator (torch.Tensor): sum of metric value for samples
denominator (int): number of samples
average='weighted':
numerator (torch.Tensor): number of true positives per class/label
denominator (torch.Tensor): number of predicted(for precision) or actual(for recall) positives per
class/label.
weight (torch.Tensor): number of actual positives per class
average='micro':
numerator (torch.Tensor): sum of number of true positives for classes/labels
denominator (torch.Tensor): sum of number of predicted(for precision) or actual(for recall) positives for
classes/labels.
average='macro' or boolean or None:
numerator (torch.Tensor): number of true positives per class/label
denominator (torch.Tensor): number of predicted(for precision) or actual(for recall) positives per
class/label.
"""
self._numerator: Union[int, torch.Tensor] = 0
self._denominator: Union[int, torch.Tensor] = 0
self._weight: Union[int, torch.Tensor] = 0
self._updated = False
super(_BasePrecisionRecall, self).reset()
@sync_all_reduce("_numerator", "_denominator")
def compute(self) -> Union[torch.Tensor, float]:
r"""
Return value of the metric for `average` options `'weighted'` and `'macro'` is computed as follows.
.. math::
\text{Precision/Recall} = \frac{ numerator }{ denominator } \cdot weight
wherein `weight` is the internal variable `_weight` for `'weighted'` option and :math:`1/C`
for the `macro` one. :math:`C` is the number of classes/labels.
Return value of the metric for `average` options `'micro'`, `'samples'`, `False` and None is as follows.
.. math::
\text{Precision/Recall} = \frac{ numerator }{ denominator }
"""
if not self._updated:
raise NotComputableError(
f"{self.__class__.__name__} must have at least one example before it can be computed."
)
fraction = self._numerator / (self._denominator + (self.eps if self._average != "samples" else 0))
if self._average == "weighted":
_weight = idist.all_reduce(self._weight.clone()) # type: ignore[union-attr]
sum_of_weights = cast(torch.Tensor, _weight).sum() + self.eps
return ((fraction @ _weight) / sum_of_weights).item() # type: ignore
elif self._average == "micro" or self._average == "samples":
return cast(torch.Tensor, fraction).item()
elif self._average == "macro":
return cast(torch.Tensor, fraction).mean().item()
else:
return fraction
class Precision(_BasePrecisionRecall):
r"""Calculates precision for binary, multiclass and multilabel data.
.. math:: \text{Precision} = \frac{ TP }{ TP + FP }
where :math:`\text{TP}` is true positives and :math:`\text{FP}` is false positives.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
average: available options are
False
default option. For multicalss and multilabel inputs, per class and per label
metric is returned respectively.
None
like `False` option except that per class metric is returned for binary data as well.
For compatibility with Scikit-Learn api.
'micro'
Metric is computed counting stats of classes/labels altogether.
.. math::
\text{Micro Precision} = \frac{\sum_{k=1}^C TP_k}{\sum_{k=1}^C TP_k+FP_k}
where :math:`C` is the number of classes/labels (2 in binary case). :math:`k` in :math:`TP_k`
and :math:`FP_k` means that the measures are computed for class/label :math:`k` (in a one-vs-rest
sense in multiclass case).
For binary and multiclass inputs, this is equivalent with accuracy,
so use :class:`~ignite.metrics.accuracy.Accuracy`.
'samples'
for multilabel input, at first, precision is computed on a
per sample basis and then average across samples is returned.
.. math::
\text{Sample-averaged Precision} = \frac{\sum_{n=1}^N \frac{TP_n}{TP_n+FP_n}}{N}
where :math:`N` is the number of samples. :math:`n` in :math:`TP_n` and :math:`FP_n`
means that the measures are computed for sample :math:`n`, across labels.
Incompatible with binary and multiclass inputs.
'weighted'
like macro precision but considers class/label imbalance. for binary and multiclass
input, it computes metric for each class then returns average of them weighted by
support of classes (number of actual samples in each class). For multilabel input,
it computes precision for each label then returns average of them weighted by support
of labels (number of actual positive samples in each label).
.. math::
Precision_k = \frac{TP_k}{TP_k+FP_k}
.. math::
\text{Weighted Precision} = \frac{\sum_{k=1}^C P_k * Precision_k}{N}
where :math:`C` is the number of classes (2 in binary case). :math:`P_k` is the number
of samples belonged to class :math:`k` in binary and multiclass case, and the number of
positive samples belonged to label :math:`k` in multilabel case.
macro
computes macro precision which is unweighted average of metric computed across
classes/labels.
.. math::
\text{Macro Precision} = \frac{\sum_{k=1}^C Precision_k}{C}
where :math:`C` is the number of classes (2 in binary case).
True
like macro option. For backward compatibility.
is_multilabel: flag to use in multilabel case. By default, value is False.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case. In binary and multilabel cases, the elements of
`y` and `y_pred` should have 0 or 1 values.
.. testcode:: 1
metric = Precision()
weighted_metric = Precision(average='weighted')
two_class_metric = Precision(average=None) # Returns precision for both classes
metric.attach(default_evaluator, "precision")
weighted_metric.attach(default_evaluator, "weighted precision")
two_class_metric.attach(default_evaluator, "both classes precision")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Precision: {state.metrics['precision']}")
print(f"Weighted Precision: {state.metrics['weighted precision']}")
print(f"Precision for class 0 and class 1: {state.metrics['both classes precision']}")
.. testoutput:: 1
Precision: 0.75
Weighted Precision: 0.6666666666666666
Precision for class 0 and class 1: tensor([0.5000, 0.7500], dtype=torch.float64)
Multiclass case
.. testcode:: 2
metric = Precision()
macro_metric = Precision(average=True)
weighted_metric = Precision(average='weighted')
metric.attach(default_evaluator, "precision")
macro_metric.attach(default_evaluator, "macro precision")
weighted_metric.attach(default_evaluator, "weighted precision")
y_true = torch.tensor([2, 0, 2, 1, 0])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288]
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Precision: {state.metrics['precision']}")
print(f"Macro Precision: {state.metrics['macro precision']}")
print(f"Weighted Precision: {state.metrics['weighted precision']}")
.. testoutput:: 2
Precision: tensor([0.5000, 0.0000, 0.3333], dtype=torch.float64)
Macro Precision: 0.27777777777777773
Weighted Precision: 0.3333333333333333
Multilabel case, the shapes must be (batch_size, num_labels, ...)
.. testcode:: 3
metric = Precision(is_multilabel=True)
micro_metric = Precision(is_multilabel=True, average='micro')
macro_metric = Precision(is_multilabel=True, average=True)
weighted_metric = Precision(is_multilabel=True, average='weighted')
samples_metric = Precision(is_multilabel=True, average='samples')
metric.attach(default_evaluator, "precision")
micro_metric.attach(default_evaluator, "micro precision")
macro_metric.attach(default_evaluator, "macro precision")
weighted_metric.attach(default_evaluator, "weighted precision")
samples_metric.attach(default_evaluator, "samples precision")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Precision: {state.metrics['precision']}")
print(f"Micro Precision: {state.metrics['micro precision']}")
print(f"Macro Precision: {state.metrics['macro precision']}")
print(f"Weighted Precision: {state.metrics['weighted precision']}")
print(f"Samples Precision: {state.metrics['samples precision']}")
.. testoutput:: 3
Precision: tensor([0.2000, 0.5000, 0.0000], dtype=torch.float64)
Micro Precision: 0.2222222222222222
Macro Precision: 0.2333333333333333
Weighted Precision: 0.175
Samples Precision: 0.2
Thresholding of predictions can be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
metric = Precision(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "precision")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["precision"])
.. testoutput:: 4
0.75
.. versionchanged:: 0.4.10
Some new options were added to `average` parameter.
"""
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
r"""
Update the metric state using prediction and target.
Args:
output: a binary tuple of tensors (y_pred, y) whose shapes follow the table below. N stands for the batch
dimension, `...` for possible additional dimensions and C for class dimension.
.. list-table::
:widths: 20 10 10 10
:header-rows: 1
* - Output member\\Data type
- Binary
- Multiclass
- Multilabel
* - y_pred
- (N, ...)
- (N, C, ...)
- (N, C, ...)
* - y
- (N, ...)
- (N, ...)
- (N, C, ...)
For binary and multilabel data, both y and y_pred should consist of 0's and 1's, but for multiclass
data, y_pred and y should consist of probabilities and integers respectively.
"""
self._check_shape(output)
self._check_type(output)
y_pred, y, correct = self._prepare_output(output)
if self._average == "samples":
all_positives = y_pred.sum(dim=1)
true_positives = correct.sum(dim=1)
self._numerator += torch.sum(true_positives / (all_positives + self.eps))
self._denominator += y.size(0)
elif self._average == "micro":
self._denominator += y_pred.sum()
self._numerator += correct.sum()
else: # _average in [False, None, 'macro', 'weighted']
self._denominator += y_pred.sum(dim=0)
self._numerator += correct.sum(dim=0)
if self._average == "weighted":
self._weight += y.sum(dim=0)
self._updated = True
|
import warnings
from typing import Callable, cast, List, Optional, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced
__all__ = ["EpochMetric"]
class EpochMetric(Metric):
"""Class for metrics that should be computed on the entire output history of a model.
Model's output and targets are restricted to be of shape ``(batch_size, n_targets)``. Output
datatype should be `float32`. Target datatype should be `long` for classification and `float` for regression.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
In distributed configuration, all stored data (output and target) is mutually collected across all processes
using all gather collective operation. This can potentially lead to a memory error.
Compute method executes ``compute_fn`` on zero rank process only and final result is broadcasted to
all processes.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
compute_fn: a callable which receives two tensors as the `predictions` and `targets`
and returns a scalar. Input tensors will be on specified ``device`` (see arg below).
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: if True, ``compute_fn`` is run on the first batch of data to ensure there are no
issues. If issues exist, user is warned that there might be an issue with the ``compute_fn``.
Default, True.
device: optional device specification for internal storage.
Example:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
def mse_fn(y_preds, y_targets):
return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item()
metric = EpochMetric(mse_fn)
metric.attach(default_evaluator, "mse")
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["mse"])
.. testoutput::
0.5729...
Warnings:
EpochMetricWarning: User is warned that there are issues with ``compute_fn`` on a batch of data processed.
To disable the warning, set ``check_compute_fn=False``.
"""
_state_dict_all_req_keys = ("_predictions", "_targets")
def __init__(
self,
compute_fn: Callable[[torch.Tensor, torch.Tensor], float],
output_transform: Callable = lambda x: x,
check_compute_fn: bool = True,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
if not callable(compute_fn):
raise TypeError("Argument compute_fn should be callable.")
self.compute_fn = compute_fn
self._check_compute_fn = check_compute_fn
super(EpochMetric, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._predictions: List[torch.Tensor] = []
self._targets: List[torch.Tensor] = []
self._result: Optional[float] = None
def _check_shape(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
if y_pred.ndimension() not in (1, 2):
raise ValueError("Predictions should be of shape (batch_size, n_targets) or (batch_size, ).")
if y.ndimension() not in (1, 2):
raise ValueError("Targets should be of shape (batch_size, n_targets) or (batch_size, ).")
def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
if len(self._predictions) < 1:
return
dtype_preds = self._predictions[-1].dtype
if dtype_preds != y_pred.dtype:
raise ValueError(
f"Incoherent types between input y_pred and stored predictions: {dtype_preds} vs {y_pred.dtype}"
)
dtype_targets = self._targets[-1].dtype
if dtype_targets != y.dtype:
raise ValueError(f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}")
@reinit__is_reduced
def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
self._check_shape(output)
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:
y_pred = y_pred.squeeze(dim=-1)
if y.ndimension() == 2 and y.shape[1] == 1:
y = y.squeeze(dim=-1)
y_pred = y_pred.clone().to(self._device)
y = y.clone().to(self._device)
self._check_type((y_pred, y))
self._predictions.append(y_pred)
self._targets.append(y)
# Check once the signature and execution of compute_fn
if len(self._predictions) == 1 and self._check_compute_fn:
try:
self.compute_fn(self._predictions[0], self._targets[0])
except Exception as e:
warnings.warn(f"Probably, there can be a problem with `compute_fn`:\n {e}.", EpochMetricWarning)
def compute(self) -> float:
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("EpochMetric must have at least one example before it can be computed.")
if self._result is None:
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
self._result = 0.0
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
self._result = self.compute_fn(_prediction_tensor, _target_tensor)
if ws > 1:
# broadcast result to all processes
self._result = cast(float, idist.broadcast(self._result, src=0))
return self._result
class EpochMetricWarning(UserWarning):
pass
|
from typing import Callable, cast, Dict, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["Loss"]
class Loss(Metric):
"""
Calculates the average loss according to the passed loss_fn.
Args:
loss_fn: a callable taking a prediction tensor, a target
tensor, optionally other arguments, and returns the average loss
over all observations in the batch.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric.
This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
The output is expected to be a tuple `(prediction, target)` or
(prediction, target, kwargs) where kwargs is a dictionary of extra
keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.
batch_size: a callable taking a target tensor that returns the
first dimension size (usually the batch size).
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Attributes:
required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the
latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the
criterion function requires additional arguments, which can be passed using ``criterion_kwargs``.
See an example below.
Examples:
Let's implement a Loss metric that requires ``x``, ``y_pred``, ``y`` and ``criterion_kwargs`` as input
for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy
and the Loss metric using an ``evaluator`` created with
:meth:`~ignite.engine.create_supervised_evaluator` method.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
model = default_model
criterion = nn.NLLLoss()
metric = Loss(criterion)
metric.attach(default_evaluator, 'loss')
y_pred = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]])
y_true = torch.tensor([2, 2]).long()
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['loss'])
.. testoutput::
-0.3499999...
"""
required_output_keys = ("y_pred", "y", "criterion_kwargs")
_state_dict_all_req_keys = ("_sum", "_num_examples")
def __init__(
self,
loss_fn: Callable,
output_transform: Callable = lambda x: x,
batch_size: Callable = len,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(Loss, self).__init__(output_transform, device=device)
self._loss_fn = loss_fn
self._batch_size = batch_size
@reinit__is_reduced
def reset(self) -> None:
self._sum = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:
if len(output) == 2:
y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)
kwargs: Dict = {}
else:
y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)
average_loss = self._loss_fn(y_pred, y, **kwargs).detach()
if len(average_loss.shape) != 0:
raise ValueError("loss_fn did not return the average loss.")
n = self._batch_size(y)
self._sum += average_loss.to(self._device) * n
self._num_examples += n
@sync_all_reduce("_sum", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("Loss must have at least one example before it can be computed.")
return self._sum.item() / self._num_examples
|
from typing import Callable, Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["PSNR"]
class PSNR(Metric):
r"""Computes average
`Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
.. math::
\text{PSNR}(I, J) = 10 * \log_{10}\left(\frac{ MAX_{I}^2 }{ \text{ MSE } }\right)
where :math:`\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` and `y` **must** have (batch_size, ...) shape.
- `y_pred` and `y` **must** have same dtype and same shape.
Args:
data_range: The data range of the target image (distance between minimum
and maximum possible values).
For other data types, please set the data range, otherwise an exception will be raised.
output_transform: A callable that is used to transform the Engine’s
process_function’s output into the form expected by the metric.
device: specifies which device updates are accumulated on.
Setting the metric’s device to be the same as your update arguments ensures
the update method is non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`,
visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
psnr = PSNR(data_range=1.0)
psnr.attach(default_evaluator, 'psnr')
preds = torch.rand([4, 3, 16, 16])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['psnr'])
.. testoutput::
16.8671405...
This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only
Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,
.. testcode::
def get_y_channel(output):
y_pred, y = output
# y_pred and y are (B, 3, H, W) and YCbCr or YUV images
# let's select y channel
return y_pred[:, 0, ...], y[:, 0, ...]
psnr = PSNR(data_range=219, output_transform=get_y_channel)
psnr.attach(default_evaluator, 'psnr')
preds = 219 * torch.rand([4, 3, 16, 16])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['psnr'])
.. testoutput::
16.7027966...
.. versionadded:: 0.4.3
"""
_state_dict_all_req_keys = ("_sum_of_batchwise_psnr", "_num_examples")
def __init__(
self,
data_range: Union[int, float],
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
super().__init__(output_transform=output_transform, device=device)
self.data_range = data_range
def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if y_pred.dtype != y.dtype:
raise TypeError(
f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
)
if y_pred.shape != y.shape:
raise ValueError(
f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape_dtype(output)
y_pred, y = output[0].detach(), output[1].detach()
dim = tuple(range(1, y.ndim))
mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)
self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range**2 / (mse_error + 1e-10))).to(
device=self._device
)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_batchwise_psnr", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("PSNR must have at least one example before it can be computed.")
return (self._sum_of_batchwise_psnr / self._num_examples).item()
|
import warnings
from typing import Any, Callable, cast, Optional, Union
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.metrics.metric import Metric, MetricUsage, reinit__is_reduced, RunningBatchWise, SingleEpochRunningBatchWise
__all__ = ["RunningAverage"]
class RunningAverage(Metric):
"""Compute running average of a metric or the output of process function.
Args:
src: input source: an instance of :class:`~ignite.metrics.metric.Metric` or None. The latter
corresponds to `engine.state.output` which holds the output of process function.
alpha: running average decay factor, default 0.98
output_transform: a function to use to transform the output if `src` is None and
corresponds the output of process function. Otherwise it should be None.
epoch_bound: whether the running average should be reset after each epoch. It is depracated in favor of
``usage`` argument in :meth:`attach` method. Setting ``epoch_bound`` to ``False`` is equivalent to
``usage=SingleEpochRunningBatchWise()`` and setting it to ``True`` is equivalent to
``usage=RunningBatchWise()`` in the :meth:`attach` method. Default None.
device: specifies which device updates are accumulated on. Should be
None when ``src`` is an instance of :class:`~ignite.metrics.metric.Metric`, as the running average will
use the ``src``'s device. Otherwise, defaults to CPU. Only applicable when the computed value
from the metric is a tensor.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
accuracy = Accuracy()
metric = RunningAverage(accuracy)
metric.attach(default_trainer, 'running_avg_accuracy')
@default_trainer.on(Events.ITERATION_COMPLETED)
def log_running_avg_metrics():
print(default_trainer.state.metrics['running_avg_accuracy'])
y_true = [torch.tensor(y) for y in [[0], [1], [0], [1], [0], [1]]]
y_pred = [torch.tensor(y) for y in [[0], [0], [0], [1], [1], [1]]]
state = default_trainer.run(zip(y_pred, y_true))
.. testoutput:: 1
1.0
0.98
0.98039...
0.98079...
0.96117...
0.96195...
.. testcode:: 2
default_trainer = get_default_trainer()
metric = RunningAverage(output_transform=lambda x: x.item())
metric.attach(default_trainer, 'running_avg_accuracy')
@default_trainer.on(Events.ITERATION_COMPLETED)
def log_running_avg_metrics():
print(default_trainer.state.metrics['running_avg_accuracy'])
y = [torch.tensor(y) for y in [[0], [1], [0], [1], [0], [1]]]
state = default_trainer.run(y)
.. testoutput:: 2
0.0
0.020000...
0.019600...
0.039208...
0.038423...
0.057655...
"""
required_output_keys = None
# TODO Shall we put `src` here? Then we should add a new branch for metric-typed attributes in `state_dict`
# and `load_state_dict`. Examples; This class; `Rouge` which has a `List[_BaseRouge]`.
_state_dict_all_req_keys = ("_value",)
def __init__(
self,
src: Optional[Metric] = None,
alpha: float = 0.98,
output_transform: Optional[Callable] = None,
epoch_bound: Optional[bool] = None,
device: Optional[Union[str, torch.device]] = None,
):
if not (isinstance(src, Metric) or src is None):
raise TypeError("Argument src should be a Metric or None.")
if not (0.0 < alpha <= 1.0):
raise ValueError("Argument alpha should be a float between 0.0 and 1.0.")
if isinstance(src, Metric):
if output_transform is not None:
raise ValueError("Argument output_transform should be None if src is a Metric.")
def output_transform(x: Any) -> Any:
return x
if device is not None:
raise ValueError("Argument device should be None if src is a Metric.")
self.src: Union[Metric, None] = src
device = src._device
else:
if output_transform is None:
raise ValueError(
"Argument output_transform should not be None if src corresponds "
"to the output of process function."
)
self.src = None
if device is None:
device = torch.device("cpu")
if epoch_bound is not None:
warnings.warn(
"`epoch_bound` is deprecated and will be removed in the future. Consider using `usage` argument of"
"`attach` method instead. `epoch_bound=True` is equivalent with `usage=SingleEpochRunningBatchWise()`"
" and `epoch_bound=False` is equivalent with `usage=RunningBatchWise()`."
)
self.epoch_bound = epoch_bound
self.alpha = alpha
super(RunningAverage, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._value: Optional[Union[float, torch.Tensor]] = None
if isinstance(self.src, Metric):
self.src.reset()
@reinit__is_reduced
def update(self, output: Union[torch.Tensor, float]) -> None:
if self.src is None:
output = output.detach().to(self._device, copy=True) if isinstance(output, torch.Tensor) else output
value = idist.all_reduce(output) / idist.get_world_size()
else:
value = self.src.compute()
self.src.reset()
if self._value is None:
self._value = value
else:
self._value = self._value * self.alpha + (1.0 - self.alpha) * value
def compute(self) -> Union[torch.Tensor, float]:
return cast(Union[torch.Tensor, float], self._value)
def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = RunningBatchWise()) -> None:
r"""
Attach the metric to the ``engine`` using the events determined by the ``usage``.
Args:
engine: the engine to get attached to.
name: by which, the metric is inserted into ``engine.state.metrics`` dictionary.
usage: the usage determining on which events the metric is reset, updated and computed. It should be an
instance of the :class:`~ignite.metrics.metric.MetricUsage`\ s in the following table.
======================================================= ===========================================
``usage`` **class** **Description**
======================================================= ===========================================
:class:`~.metrics.metric.RunningBatchWise` Running average of the ``src`` metric or
``engine.state.output`` is computed across
batches. In the former case, on each batch,
``src`` is reset, updated and computed then
its value is retrieved. Default.
:class:`~.metrics.metric.SingleEpochRunningBatchWise` Same as above but the running average is
computed across batches in an epoch so it
is reset at the end of the epoch.
:class:`~.metrics.metric.RunningEpochWise` Running average of the ``src`` metric or
``engine.state.output`` is computed across
epochs. In the former case, ``src`` works
as if it was attached in a
:class:`~ignite.metrics.metric.EpochWise`
manner and its computed value is retrieved
at the end of the epoch. The latter case
doesn't make much sense for this usage as
the ``engine.state.output`` of the last
batch is retrieved then.
======================================================= ===========================================
``RunningAverage`` retrieves ``engine.state.output`` at ``usage.ITERATION_COMPLETED`` if the ``src`` is not
given and it's computed and updated using ``src``, by manually calling its ``compute`` method, or
``engine.state.output`` at ``usage.COMPLETED`` event.
Also if ``src`` is given, it is updated at ``usage.ITERATION_COMPLETED``, but its reset event is determined by
``usage`` type. If ``isinstance(usage, BatchWise)`` holds true, ``src`` is reset on ``BatchWise().STARTED``,
otherwise on ``EpochWise().STARTED`` if ``isinstance(usage, EpochWise)``.
.. versionchanged:: 0.5.1
Added `usage` argument
"""
usage = self._check_usage(usage)
if self.epoch_bound is not None:
usage = SingleEpochRunningBatchWise() if self.epoch_bound else RunningBatchWise()
if isinstance(self.src, Metric) and not engine.has_event_handler(
self.src.iteration_completed, Events.ITERATION_COMPLETED
):
engine.add_event_handler(Events.ITERATION_COMPLETED, self.src.iteration_completed)
super().attach(engine, name, usage)
def detach(self, engine: Engine, usage: Union[str, MetricUsage] = RunningBatchWise()) -> None:
usage = self._check_usage(usage)
if self.epoch_bound is not None:
usage = SingleEpochRunningBatchWise() if self.epoch_bound else RunningBatchWise()
if isinstance(self.src, Metric) and engine.has_event_handler(
self.src.iteration_completed, Events.ITERATION_COMPLETED
):
engine.remove_event_handler(self.src.iteration_completed, Events.ITERATION_COMPLETED)
super().detach(engine, usage)
|
import itertools
from typing import Any, Callable, Optional, Union
import torch
from ignite.engine import Engine
from ignite.metrics.metric import EpochWise, Metric, MetricUsage, reinit__is_reduced
__all__ = ["MetricsLambda"]
class MetricsLambda(Metric):
"""
Apply a function to other metrics to obtain a new metric.
The result of the new metric is defined to be the result
of applying the function to the result of argument metrics.
When update, this metric recursively updates the metrics
it depends on. When reset, all its dependency metrics would be
resetted as well. When attach, all its dependency metrics would be attached
automatically (but partially, e.g :meth:`~ignite.metrics.metric.Metric.is_attached()` will return False).
Args:
f: the function that defines the computation
args: Sequence of other metrics or something
else that will be fed to ``f`` as arguments.
kwargs: Sequence of other metrics or something
else that will be fed to ``f`` as keyword arguments.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F2 = MetricsLambda(Fbeta, recall, precision, 2)
F3 = MetricsLambda(Fbeta, recall, precision, 3)
F4 = MetricsLambda(Fbeta, recall, precision, 4)
F1.attach(default_evaluator, "F1")
F2.attach(default_evaluator, "F2")
F3.attach(default_evaluator, "F3")
F4.attach(default_evaluator, "F4")
y_true = torch.tensor([1, 0, 1, 0, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["F1"])
print(state.metrics["F2"])
print(state.metrics["F3"])
print(state.metrics["F4"])
.. testoutput::
0.8571...
0.9375...
0.9677...
0.9807...
When check if the metric is attached, if one of its dependency
metrics is detached, the metric is considered detached too.
.. code-block:: python
engine = ...
precision = Precision(average=False)
aP = precision.mean()
aP.attach(engine, "aP")
assert aP.is_attached(engine)
# partially attached
assert not precision.is_attached(engine)
precision.detach(engine)
assert not aP.is_attached(engine)
# fully attached
assert not precision.is_attached(engine)
"""
def __init__(self, f: Callable, *args: Any, **kwargs: Any) -> None:
self.function = f
self.args = args
self.kwargs = kwargs
self.engine: Optional[Engine] = None
self._updated = False
super(MetricsLambda, self).__init__(device="cpu")
@reinit__is_reduced
def reset(self) -> None:
for i in itertools.chain(self.args, self.kwargs.values()):
if isinstance(i, Metric):
i.reset()
self._updated = False
@reinit__is_reduced
def update(self, output: Any) -> None:
if self.engine:
raise ValueError(
"MetricsLambda is already attached to an engine, "
"and MetricsLambda can't use update API while it's attached."
)
for i in itertools.chain(self.args, self.kwargs.values()):
if isinstance(i, Metric):
i.update(output)
self._updated = True
def compute(self) -> Any:
materialized = [_get_value_on_cpu(i) for i in self.args]
materialized_kwargs = {k: _get_value_on_cpu(v) for k, v in self.kwargs.items()}
return self.function(*materialized, **materialized_kwargs)
def _internal_attach(self, engine: Engine, usage: MetricUsage) -> None:
self.engine = engine
for index, metric in enumerate(itertools.chain(self.args, self.kwargs.values())):
if isinstance(metric, MetricsLambda):
metric._internal_attach(engine, usage)
elif isinstance(metric, Metric):
# NB : metrics is attached partially
# We must not use is_attached() but rather if these events exist
if not engine.has_event_handler(metric.started, usage.STARTED):
engine.add_event_handler(usage.STARTED, metric.started)
if not engine.has_event_handler(metric.iteration_completed, usage.ITERATION_COMPLETED):
engine.add_event_handler(usage.ITERATION_COMPLETED, metric.iteration_completed)
def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = EpochWise()) -> None:
if self._updated:
raise ValueError(
"The underlying metrics are already updated, can't attach while using reset/update/compute API."
)
usage = self._check_usage(usage)
# recursively attach all its dependencies (partially)
self._internal_attach(engine, usage)
# attach only handler on EPOCH_COMPLETED
engine.add_event_handler(usage.COMPLETED, self.completed, name)
def detach(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> None:
usage = self._check_usage(usage)
# remove from engine
super(MetricsLambda, self).detach(engine, usage)
self.engine = None
def is_attached(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> bool:
usage = self._check_usage(usage)
# check recursively the dependencies
return super(MetricsLambda, self).is_attached(engine, usage) and self._internal_is_attached(engine, usage)
def _internal_is_attached(self, engine: Engine, usage: MetricUsage) -> bool:
# if no engine, metrics is not attached
if engine is None:
return False
# check recursively if metrics are attached
is_detached = False
for metric in itertools.chain(self.args, self.kwargs.values()):
if isinstance(metric, MetricsLambda):
if not metric._internal_is_attached(engine, usage):
is_detached = True
elif isinstance(metric, Metric):
if not engine.has_event_handler(metric.started, usage.STARTED):
is_detached = True
if not engine.has_event_handler(metric.iteration_completed, usage.ITERATION_COMPLETED):
is_detached = True
return not is_detached
def _get_value_on_cpu(v: Any) -> Any:
if isinstance(v, Metric):
v = v.compute()
if isinstance(v, torch.Tensor):
v = v.cpu()
return v
|
from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["Accuracy"]
class _BaseClassification(Metric):
def __init__(
self,
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
self._is_multilabel = is_multilabel
self._type: Optional[str] = None
self._num_classes: Optional[int] = None
super(_BaseClassification, self).__init__(output_transform=output_transform, device=device)
def reset(self) -> None:
self._type = None
self._num_classes = None
def _check_shape(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if not (y.ndimension() == y_pred.ndimension() or y.ndimension() + 1 == y_pred.ndimension()):
raise ValueError(
"y must have shape of (batch_size, ...) and y_pred must have "
"shape of (batch_size, num_categories, ...) or (batch_size, ...), "
f"but given {y.shape} vs {y_pred.shape}."
)
y_shape = y.shape
y_pred_shape: Tuple[int, ...] = y_pred.shape
if y.ndimension() + 1 == y_pred.ndimension():
y_pred_shape = (y_pred_shape[0],) + y_pred_shape[2:]
if not (y_shape == y_pred_shape):
raise ValueError("y and y_pred must have compatible shapes.")
if self._is_multilabel and not (y.shape == y_pred.shape and y.ndimension() > 1 and y.shape[1] > 1):
raise ValueError(
"y and y_pred must have same shape of (batch_size, num_categories, ...) and num_categories > 1."
)
def _check_binary_multilabel_cases(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if not torch.equal(y, y**2):
raise ValueError("For binary cases, y must be comprised of 0's and 1's.")
if not torch.equal(y_pred, y_pred**2):
raise ValueError("For binary cases, y_pred must be comprised of 0's and 1's.")
def _check_type(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if y.ndimension() + 1 == y_pred.ndimension():
num_classes = y_pred.shape[1]
if num_classes == 1:
update_type = "binary"
self._check_binary_multilabel_cases((y_pred, y))
else:
update_type = "multiclass"
elif y.ndimension() == y_pred.ndimension():
self._check_binary_multilabel_cases((y_pred, y))
if self._is_multilabel:
update_type = "multilabel"
num_classes = y_pred.shape[1]
else:
update_type = "binary"
num_classes = 1
else:
raise RuntimeError(
f"Invalid shapes of y (shape={y.shape}) and y_pred (shape={y_pred.shape}), check documentation."
" for expected shapes of y and y_pred."
)
if self._type is None:
self._type = update_type
self._num_classes = num_classes
else:
if self._type != update_type:
raise RuntimeError(f"Input data type has changed from {self._type} to {update_type}.")
if self._num_classes != num_classes:
raise ValueError(f"Input data number of classes has changed from {self._num_classes} to {num_classes}")
class Accuracy(_BaseClassification):
r"""Calculates the accuracy for binary, multiclass and multilabel data.
.. math:: \text{Accuracy} = \frac{ TP + TN }{ TP + TN + FP + FN }
where :math:`\text{TP}` is true positives, :math:`\text{TN}` is true negatives,
:math:`\text{FP}` is false positives and :math:`\text{FN}` is false negatives.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
- `y` and `y_pred` must be in the following shape of (batch_size, num_categories, ...) and
num_categories must be greater than 1 for multilabel cases.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
is_multilabel: flag to use in multilabel case. By default, False.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case
.. testcode:: 1
metric = Accuracy()
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 1
0.6666...
Multiclass case
.. testcode:: 2
metric = Accuracy()
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 2
0.5
Multilabel case
.. testcode:: 3
metric = Accuracy(is_multilabel=True)
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([
[0, 0, 1, 0, 1],
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 1, 0, 1],
])
y_pred = torch.tensor([
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 1, 1, 1],
[1, 1, 0, 0, 1],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 3
0.2
In binary and multilabel cases, the elements of `y` and `y_pred` should have 0 or 1 values. Thresholding of
predictions can be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
metric = Accuracy(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 4
0.6666...
"""
_state_dict_all_req_keys = ("_num_correct", "_num_examples")
def __init__(
self,
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(Accuracy, self).__init__(output_transform=output_transform, is_multilabel=is_multilabel, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._num_correct = torch.tensor(0, device=self._device)
self._num_examples = 0
super(Accuracy, self).reset()
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
self._check_type(output)
y_pred, y = output[0].detach(), output[1].detach()
if self._type == "binary":
correct = torch.eq(y_pred.view(-1).to(y), y.view(-1))
elif self._type == "multiclass":
indices = torch.argmax(y_pred, dim=1)
correct = torch.eq(indices, y).view(-1)
elif self._type == "multilabel":
# if y, y_pred shape is (N, C, ...) -> (N x ..., C)
num_classes = y_pred.size(1)
last_dim = y_pred.ndimension()
y_pred = torch.transpose(y_pred, 1, last_dim - 1).reshape(-1, num_classes)
y = torch.transpose(y, 1, last_dim - 1).reshape(-1, num_classes)
correct = torch.all(y == y_pred.type_as(y), dim=-1)
self._num_correct += torch.sum(correct).to(self._device)
self._num_examples += correct.shape[0]
@sync_all_reduce("_num_examples", "_num_correct")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("Accuracy must have at least one example before it can be computed.")
return self._num_correct.item() / self._num_examples
|
from typing import Callable, Sequence, Union
import torch
from torch.nn.functional import pairwise_distance
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MeanPairwiseDistance"]
class MeanPairwiseDistance(Metric):
"""Calculates the mean :class:`~torch.nn.PairwiseDistance`.
Average of pairwise distances computed on provided batches.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
p: the norm degree. Default: 2
eps: Small value to avoid division by zero. Default: 1e-6
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanPairwiseDistance(p=4)
metric.attach(default_evaluator, 'mpd')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['mpd'])
.. testoutput::
1.5955...
"""
_state_dict_all_req_keys = ("_sum_of_distances", "_num_examples")
def __init__(
self,
p: int = 2,
eps: float = 1e-6,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(MeanPairwiseDistance, self).__init__(output_transform, device=device)
self._p = p
self._eps = eps
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_distances = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
distances = pairwise_distance(y_pred, y, p=self._p, eps=self._eps)
self._sum_of_distances += torch.sum(distances).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_distances", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanAbsoluteError must have at least one example before it can be computed.")
return self._sum_of_distances.item() / self._num_examples
|
import numbers
from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
from ignite.metrics.metrics_lambda import MetricsLambda
__all__ = ["ConfusionMatrix", "mIoU", "IoU", "DiceCoefficient", "cmAccuracy", "cmPrecision", "cmRecall", "JaccardIndex"]
class ConfusionMatrix(Metric):
"""Calculates confusion matrix for multi-class data.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must contain logits and has the following shape (batch_size, num_classes, ...).
If you are doing binary classification, see Note for an example on how to get this.
- `y` should have the following shape (batch_size, ...) and contains ground-truth class indices
with or without the background class. During the computation, argmax of `y_pred` is taken to determine
predicted classes.
Args:
num_classes: Number of classes, should be > 1. See notes for more details.
average: confusion matrix values averaging schema: None, "samples", "recall", "precision".
Default is None. If `average="samples"` then confusion matrix values are normalized by the number of seen
samples. If `average="recall"` then confusion matrix values are normalized such that diagonal values
represent class recalls. If `average="precision"` then confusion matrix values are normalized such that
diagonal values represent class precisions.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Note:
The confusion matrix is formatted such that columns are predictions and rows are targets.
For example, if you were to plot the matrix, you could correctly assign to the horizontal axis
the label "predicted values" and to the vertical axis the label "actual values".
Note:
In case of the targets `y` in `(batch_size, ...)` format, target indices between 0 and `num_classes` only
contribute to the confusion matrix and others are neglected. For example, if `num_classes=20` and target index
equal 255 is encountered, then it is filtered out.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
metric = ConfusionMatrix(num_classes=3)
metric.attach(default_evaluator, 'cm')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['cm'])
.. testoutput:: 1
tensor([[1, 1, 0],
[0, 2, 0],
[0, 1, 0]])
If you are doing binary classification with a single output unit, you may have to transform your network output,
so that you have one value for each class. E.g. you can transform your network output into a one-hot vector
with:
.. testcode:: 2
def binary_one_hot_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred).round().long()
y_pred = ignite.utils.to_onehot(y_pred, 2)
y = y.long()
return y_pred, y
metric = ConfusionMatrix(num_classes=2, output_transform=binary_one_hot_output_transform)
metric.attach(default_evaluator, 'cm')
y_true = torch.tensor([0, 1, 0, 1, 0])
y_pred = torch.tensor([0, 0, 1, 1, 0])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['cm'])
.. testoutput:: 2
tensor([[2, 1],
[1, 1]])
"""
_state_dict_all_req_keys = ("confusion_matrix", "_num_examples")
def __init__(
self,
num_classes: int,
average: Optional[str] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if average is not None and average not in ("samples", "recall", "precision"):
raise ValueError("Argument average can None or one of 'samples', 'recall', 'precision'")
if num_classes <= 1:
raise ValueError("Argument num_classes needs to be > 1")
self.num_classes = num_classes
self._num_examples = 0
self.average = average
super(ConfusionMatrix, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.confusion_matrix = torch.zeros(self.num_classes, self.num_classes, dtype=torch.int64, device=self._device)
self._num_examples = 0
def _check_shape(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() < 2:
raise ValueError(
f"y_pred must have shape (batch_size, num_classes (currently set to {self.num_classes}), ...), "
f"but given {y_pred.shape}"
)
if y_pred.shape[1] != self.num_classes:
raise ValueError(f"y_pred does not have correct number of classes: {y_pred.shape[1]} vs {self.num_classes}")
if not (y.ndimension() + 1 == y_pred.ndimension()):
raise ValueError(
f"y_pred must have shape (batch_size, num_classes (currently set to {self.num_classes}), ...) "
"and y must have shape of (batch_size, ...), "
f"but given {y.shape} vs {y_pred.shape}."
)
y_shape = y.shape
y_pred_shape: Tuple[int, ...] = y_pred.shape
if y.ndimension() + 1 == y_pred.ndimension():
y_pred_shape = (y_pred_shape[0],) + y_pred_shape[2:]
if y_shape != y_pred_shape:
raise ValueError("y and y_pred must have compatible shapes.")
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
y_pred, y = output[0].detach(), output[1].detach()
self._num_examples += y_pred.shape[0]
# target is (batch_size, ...)
y_pred = torch.argmax(y_pred, dim=1).flatten()
y = y.flatten()
target_mask = (y >= 0) & (y < self.num_classes)
y = y[target_mask]
y_pred = y_pred[target_mask]
indices = self.num_classes * y + y_pred
m = torch.bincount(indices, minlength=self.num_classes**2).reshape(self.num_classes, self.num_classes)
self.confusion_matrix += m.to(self.confusion_matrix)
@sync_all_reduce("confusion_matrix", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("Confusion matrix must have at least one example before it can be computed.")
if self.average:
self.confusion_matrix = self.confusion_matrix.float()
if self.average == "samples":
return self.confusion_matrix / self._num_examples
else:
return self.normalize(self.confusion_matrix, self.average)
return self.confusion_matrix
@staticmethod
def normalize(matrix: torch.Tensor, average: str) -> torch.Tensor:
"""Normalize given `matrix` with given `average`."""
if average == "recall":
return matrix / (matrix.sum(dim=1).unsqueeze(1) + 1e-15)
elif average == "precision":
return matrix / (matrix.sum(dim=0) + 1e-15)
else:
raise ValueError("Argument average should be one of 'samples', 'recall', 'precision'")
def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
r"""Calculates Intersection over Union using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
.. math:: \text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert }
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = IoU(cm)
metric.attach(default_evaluator, 'iou')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['iou'])
.. testoutput::
tensor([0.5000, 0.5000, 0.0000], dtype=torch.float64)
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}")
if not (cm.average in (None, "samples")):
raise ValueError("ConfusionMatrix should have average attribute either None or 'samples'")
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError(
f"ignore_index should be integer and in the range of [0, {cm.num_classes}), but given {ignore_index}"
)
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
iou: MetricsLambda = cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag() + 1e-15)
if ignore_index is not None:
ignore_idx: int = ignore_index # used due to typing issues with mympy
def ignore_index_fn(iou_vector: torch.Tensor) -> torch.Tensor:
if ignore_idx >= len(iou_vector):
raise ValueError(f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}")
indices = list(range(len(iou_vector)))
indices.remove(ignore_idx)
return iou_vector[indices]
return MetricsLambda(ignore_index_fn, iou)
else:
return iou
def mIoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
"""Calculates mean Intersection over Union using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = mIoU(cm, ignore_index=0)
metric.attach(default_evaluator, 'miou')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['miou'])
.. testoutput::
0.24999...
"""
iou: MetricsLambda = IoU(cm=cm, ignore_index=ignore_index).mean()
return iou
def cmAccuracy(cm: ConfusionMatrix) -> MetricsLambda:
"""Calculates accuracy using :class:`~ignite.metrics.metric.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
accuracy: MetricsLambda = cm.diag().sum() / (cm.sum() + 1e-15)
return accuracy
def cmPrecision(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda:
"""Calculates precision using :class:`~ignite.metrics.metric.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
average: if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
precision: MetricsLambda = cm.diag() / (cm.sum(dim=0) + 1e-15)
if average:
mean: MetricsLambda = precision.mean()
return mean
return precision
def cmRecall(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda:
"""
Calculates recall using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
average: if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
recall: MetricsLambda = cm.diag() / (cm.sum(dim=1) + 1e-15)
if average:
mean: MetricsLambda = recall.mean()
return mean
return recall
def DiceCoefficient(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
"""Calculates Dice Coefficient for a given :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = DiceCoefficient(cm, ignore_index=0)
metric.attach(default_evaluator, 'dice')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['dice'])
.. testoutput::
tensor([0.6667, 0.0000], dtype=torch.float64)
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}")
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError(
f"ignore_index should be integer and in the range of [0, {cm.num_classes}), but given {ignore_index}"
)
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
dice: MetricsLambda = 2.0 * cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) + 1e-15)
if ignore_index is not None:
ignore_idx: int = ignore_index # used due to typing issues with mympy
def ignore_index_fn(dice_vector: torch.Tensor) -> torch.Tensor:
if ignore_idx >= len(dice_vector):
raise ValueError(
f"ignore_index {ignore_idx} is larger than the length of Dice vector {len(dice_vector)}"
)
indices = list(range(len(dice_vector)))
indices.remove(ignore_idx)
return dice_vector[indices]
return MetricsLambda(ignore_index_fn, dice)
else:
return dice
def JaccardIndex(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
r"""Calculates the Jaccard Index using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Implementation is based on :meth:`~ignite.metrics.IoU`.
.. math:: \text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert }
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = JaccardIndex(cm, ignore_index=0)
metric.attach(default_evaluator, 'jac')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['jac'])
.. testoutput::
tensor([0.5000, 0.0000], dtype=torch.float64)
"""
return IoU(cm, ignore_index)
|
import json
from typing import Callable, Collection, Dict, List, Optional, Union
import torch
from ignite.metrics.fbeta import Fbeta
from ignite.metrics.metrics_lambda import MetricsLambda
from ignite.metrics.precision import Precision
from ignite.metrics.recall import Recall
__all__ = ["ClassificationReport"]
def ClassificationReport(
beta: int = 1,
output_dict: bool = False,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
is_multilabel: bool = False,
labels: Optional[List[str]] = None,
) -> MetricsLambda:
r"""Build a text report showing the main classification metrics. The report resembles in functionality to
`scikit-learn classification_report
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html#sklearn.metrics.classification_report>`_
The underlying implementation doesn't use the sklearn function.
Args:
beta: weight of precision in harmonic mean
output_dict: If True, return output as dict, otherwise return a str
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
is_multilabel: If True, the tensors are assumed to be multilabel.
device: optional device specification for internal storage.
labels: Optional list of label indices to include in the report
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Multiclass case
.. testcode:: 1
metric = ClassificationReport(output_dict=True)
metric.attach(default_evaluator, "cr")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["cr"].keys())
print(state.metrics["cr"]["0"])
print(state.metrics["cr"]["1"])
print(state.metrics["cr"]["2"])
print(state.metrics["cr"]["macro avg"])
.. testoutput:: 1
dict_keys(['0', '1', '2', 'macro avg'])
{'precision': 0.5, 'recall': 0.5, 'f1-score': 0.4999...}
{'precision': 1.0, 'recall': 0.5, 'f1-score': 0.6666...}
{'precision': 0.3333..., 'recall': 0.5, 'f1-score': 0.3999...}
{'precision': 0.6111..., 'recall': 0.5, 'f1-score': 0.5222...}
Multilabel case, the shapes must be (batch_size, num_categories, ...)
.. testcode:: 2
metric = ClassificationReport(output_dict=True, is_multilabel=True)
metric.attach(default_evaluator, "cr")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["cr"].keys())
print(state.metrics["cr"]["0"])
print(state.metrics["cr"]["1"])
print(state.metrics["cr"]["2"])
print(state.metrics["cr"]["macro avg"])
.. testoutput:: 2
dict_keys(['0', '1', '2', 'macro avg'])
{'precision': 0.2, 'recall': 1.0, 'f1-score': 0.3333...}
{'precision': 0.5, 'recall': 1.0, 'f1-score': 0.6666...}
{'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0}
{'precision': 0.2333..., 'recall': 0.6666..., 'f1-score': 0.3333...}
"""
# setup all the underlying metrics
precision = Precision(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device)
recall = Recall(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device)
fbeta = Fbeta(beta, average=False, precision=precision, recall=recall)
averaged_precision = precision.mean()
averaged_recall = recall.mean()
averaged_fbeta = fbeta.mean()
def _wrapper(
re: torch.Tensor, pr: torch.Tensor, f: torch.Tensor, a_re: torch.Tensor, a_pr: torch.Tensor, a_f: torch.Tensor
) -> Union[Collection[str], Dict]:
if pr.shape != re.shape:
raise ValueError(
"Internal error: Precision and Recall have mismatched shapes: "
f"{pr.shape} vs {re.shape}. Please, open an issue "
"with a reference on this error. Thank you!"
)
dict_obj = {}
for idx, p_label in enumerate(pr):
dict_obj[_get_label_for_class(idx)] = {
"precision": p_label.item(),
"recall": re[idx].item(),
"f{0}-score".format(beta): f[idx].item(),
}
dict_obj["macro avg"] = {
"precision": a_pr.item(),
"recall": a_re.item(),
"f{0}-score".format(beta): a_f.item(),
}
return dict_obj if output_dict else json.dumps(dict_obj)
# helper method to get a label for a given class
def _get_label_for_class(idx: int) -> str:
return labels[idx] if labels else str(idx)
return MetricsLambda(_wrapper, recall, precision, fbeta, averaged_recall, averaged_precision, averaged_fbeta)
|
from ignite.metrics.gan.fid import FID
from ignite.metrics.gan.inception_score import InceptionScore
__all__ = [
"InceptionScore",
"FID",
]
|
from typing import Callable, Optional, Union
import torch
from packaging.version import Version
from ignite.metrics.metric import Metric
class InceptionModel(torch.nn.Module):
r"""Inception Model pre-trained on the ImageNet Dataset.
Args:
return_features: set it to `True` if you want the model to return features from the last pooling
layer instead of prediction probabilities.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
"""
def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None:
try:
import torchvision
from torchvision import models
except ImportError:
raise ModuleNotFoundError("This module requires torchvision to be installed.")
super(InceptionModel, self).__init__()
self._device = device
if Version(torchvision.__version__) < Version("0.13.0"):
model_kwargs = {"pretrained": True}
else:
model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
self.model = models.inception_v3(**model_kwargs).to(self._device)
if return_features:
self.model.fc = torch.nn.Identity()
else:
self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))
self.model.eval()
@torch.no_grad()
def forward(self, data: torch.Tensor) -> torch.Tensor:
if data.dim() != 4:
raise ValueError(f"Inputs should be a tensor of dim 4, got {data.dim()}")
if data.shape[1] != 3:
raise ValueError(f"Inputs should be a tensor with 3 channels, got {data.shape}")
if data.device != torch.device(self._device):
data = data.to(self._device)
return self.model(data)
class _BaseInceptionMetric(Metric):
def __init__(
self,
num_features: Optional[int],
feature_extractor: Optional[torch.nn.Module],
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
if num_features is None:
raise ValueError("Argument num_features must be provided, if feature_extractor is specified.")
if feature_extractor is None:
feature_extractor = torch.nn.Identity()
if num_features <= 0:
raise ValueError(f"Argument num_features must be greater to zero, got: {num_features}")
if not isinstance(feature_extractor, torch.nn.Module):
raise TypeError(
f"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}"
)
self._num_features = num_features
self._feature_extractor = feature_extractor.to(device)
super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)
def _check_feature_shapes(self, samples: torch.Tensor) -> None:
if samples.dim() != 2:
raise ValueError(f"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}")
if samples.shape[0] == 0:
raise ValueError(f"Batch size should be greater than one, got: {samples.shape[0]}")
if samples.shape[1] != self._num_features:
raise ValueError(
f"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}"
)
def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:
inputs = inputs.detach()
if inputs.device != torch.device(self._device):
inputs = inputs.to(self._device)
with torch.no_grad():
outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)
self._check_feature_shapes(outputs)
return outputs
|
import warnings
from typing import Callable, Optional, Sequence, Union
import torch
from packaging.version import Version
from ignite.metrics.gan.utils import _BaseInceptionMetric, InceptionModel
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
__all__ = [
"FID",
]
if Version(torch.__version__) <= Version("1.7.0"):
torch_outer = torch.ger
else:
torch_outer = torch.outer
def fid_score(
mu1: torch.Tensor, mu2: torch.Tensor, sigma1: torch.Tensor, sigma2: torch.Tensor, eps: float = 1e-6
) -> float:
try:
import numpy as np
except ImportError:
raise ModuleNotFoundError("fid_score requires numpy to be installed.")
try:
import scipy.linalg
except ImportError:
raise ModuleNotFoundError("fid_score requires scipy to be installed.")
mu1, mu2 = mu1.cpu(), mu2.cpu()
sigma1, sigma2 = sigma1.cpu(), sigma2.cpu()
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = scipy.linalg.sqrtm(sigma1.mm(sigma2), disp=False)
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
if not np.isfinite(covmean).all():
tr_covmean = np.sum(np.sqrt(((np.diag(sigma1) * eps) * (np.diag(sigma2) * eps)) / (eps * eps)))
return float(diff.dot(diff).item() + torch.trace(sigma1) + torch.trace(sigma2) - 2 * tr_covmean)
class FID(_BaseInceptionMetric):
r"""Calculates Frechet Inception Distance.
.. math::
\text{FID} = |\mu_{1} - \mu_{2}| + \text{Tr}(\sigma_{1} + \sigma_{2} - {2}\sqrt{\sigma_1*\sigma_2})
where :math:`\mu_1` and :math:`\sigma_1` refer to the mean and covariance of the train data and
:math:`\mu_2` and :math:`\sigma_2` refer to the mean and covariance of the test data.
More details can be found in `Heusel et al. 2002`__
__ https://arxiv.org/pdf/1706.08500.pdf
In addition, a faster and online computation approach can be found in `Chen et al. 2014`__
__ https://arxiv.org/pdf/2009.14075.pdf
Remark:
This implementation is inspired by `pytorch_fid` package which can be found `here`__
__ https://github.com/mseitzer/pytorch-fid
.. note::
The default Inception model requires the `torchvision` module to be installed.
FID also requires `scipy` library for matrix square root calculations.
Args:
num_features: number of features predicted by the model or the reduced feature vector of the image.
Default value is 2048.
feature_extractor: a torch Module for extracting the features from the input data.
It returns a tensor of shape (batch_size, num_features).
If neither ``num_features`` nor ``feature_extractor`` are defined, by default we use an ImageNet
pretrained Inception Model. If only ``num_features`` is defined but ``feature_extractor`` is not
defined, ``feature_extractor`` is assigned Identity Function.
Please note that the model will be implicitly converted to device mentioned in the ``device``
argument.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = FID(num_features=1, feature_extractor=default_model)
metric.attach(default_evaluator, "fid")
y_true = torch.ones(10, 4)
y_pred = torch.ones(10, 4)
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["fid"])
.. testoutput::
0.0
.. note::
The default `torchvision` model used is InceptionV3 pretrained on ImageNet.
This can lead to differences in results with `pytorch_fid`. To find comparable results,
the following model wrapper should be used:
.. code::
import torch.nn as nn
# wrapper class as feature_extractor
class WrapperInceptionV3(nn.Module):
def __init__(self, fid_incv3):
super().__init__()
self.fid_incv3 = fid_incv3
@torch.no_grad()
def forward(self, x):
y = self.fid_incv3(x)
y = y[0]
y = y[:, :, 0, 0]
return y
# use cpu rather than cuda to get comparable results
device = "cpu"
# pytorch_fid model
dims = 2048
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
# wrapper model to pytorch_fid model
wrapper_model = WrapperInceptionV3(model)
wrapper_model.eval();
# comparable metric
pytorch_fid_metric = FID(num_features=dims, feature_extractor=wrapper_model)
Important, `pytorch_fid` results depend on the batch size if the device is `cuda`.
.. versionadded:: 0.4.6
"""
_state_dict_all_req_keys = ("_num_examples", "_train_total", "_test_total", "_train_sigma", "_test_sigma")
def __init__(
self,
num_features: Optional[int] = None,
feature_extractor: Optional[torch.nn.Module] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
try:
import numpy as np # noqa: F401
except ImportError:
raise ModuleNotFoundError("This module requires numpy to be installed.")
try:
import scipy # noqa: F401
except ImportError:
raise ModuleNotFoundError("This module requires scipy to be installed.")
if num_features is None and feature_extractor is None:
num_features = 1000
feature_extractor = InceptionModel(return_features=False, device=device)
self._eps = 1e-6
super(FID, self).__init__(
num_features=num_features,
feature_extractor=feature_extractor,
output_transform=output_transform,
device=device,
)
@staticmethod
def _online_update(features: torch.Tensor, total: torch.Tensor, sigma: torch.Tensor) -> None:
total += features
sigma += torch_outer(features, features)
def _get_covariance(self, sigma: torch.Tensor, total: torch.Tensor) -> torch.Tensor:
r"""
Calculates covariance from mean and sum of products of variables
"""
sub_matrix = torch_outer(total, total)
sub_matrix = sub_matrix / self._num_examples
return (sigma - sub_matrix) / (self._num_examples - 1)
@reinit__is_reduced
def reset(self) -> None:
self._train_sigma = torch.zeros(
(self._num_features, self._num_features), dtype=torch.float64, device=self._device
)
self._train_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
self._test_sigma = torch.zeros(
(self._num_features, self._num_features), dtype=torch.float64, device=self._device
)
self._test_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
self._num_examples: int = 0
super(FID, self).reset()
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
train, test = output
train_features = self._extract_features(train)
test_features = self._extract_features(test)
if train_features.shape[0] != test_features.shape[0] or train_features.shape[1] != test_features.shape[1]:
raise ValueError(
f"""
Number of Training Features and Testing Features should be equal ({train_features.shape} != {test_features.shape})
"""
)
# Updates the mean and covariance for the train features
for features in train_features:
self._online_update(features, self._train_total, self._train_sigma)
# Updates the mean and covariance for the test features
for features in test_features:
self._online_update(features, self._test_total, self._test_sigma)
self._num_examples += train_features.shape[0]
@sync_all_reduce("_num_examples", "_train_total", "_test_total", "_train_sigma", "_test_sigma")
def compute(self) -> float:
fid = fid_score(
mu1=self._train_total / self._num_examples,
mu2=self._test_total / self._num_examples,
sigma1=self._get_covariance(self._train_sigma, self._train_total),
sigma2=self._get_covariance(self._test_sigma, self._test_total),
eps=self._eps,
)
if torch.isnan(torch.tensor(fid)) or torch.isinf(torch.tensor(fid)):
warnings.warn("The product of covariance of train and test features is out of bounds.")
return fid
|
from typing import Callable, Optional, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.gan.utils import _BaseInceptionMetric, InceptionModel
# These decorators helps with distributed settings
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
__all__ = ["InceptionScore"]
class InceptionScore(_BaseInceptionMetric):
r"""Calculates Inception Score.
.. math::
\text{IS(G)} = \exp(\frac{1}{N}\sum_{i=1}^{N} D_{KL} (p(y|x^{(i)} \parallel \hat{p}(y))))
where :math:`p(y|x)` is the conditional probability of image being the given object and
:math:`p(y)` is the marginal probability that the given image is real, `G` refers to the
generated image and :math:`D_{KL}` refers to KL Divergence of the above mentioned probabilities.
More details can be found in `Barratt et al. 2018`__.
__ https://arxiv.org/pdf/1801.01973.pdf
Args:
num_features: number of features predicted by the model or number of classes of the model. Default
value is 1000.
feature_extractor: a torch Module for predicting the probabilities from the input data.
It returns a tensor of shape (batch_size, num_features).
If neither ``num_features`` nor ``feature_extractor`` are defined, by default we use an ImageNet
pretrained Inception Model. If only ``num_features`` is defined but ``feature_extractor`` is not
defined, ``feature_extractor`` is assigned Identity Function.
Please note that the class object will be implicitly converted to device mentioned in the
``device`` argument.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``y_pred``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
.. note::
The default Inception model requires the `torchvision` module to be installed.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. code-block:: python
metric = InceptionScore()
metric.attach(default_evaluator, "is")
y = torch.rand(10, 3, 299, 299)
state = default_evaluator.run([y])
print(state.metrics["is"])
.. testcode::
metric = InceptionScore(num_features=1, feature_extractor=default_model)
metric.attach(default_evaluator, "is")
y = torch.zeros(10, 4)
state = default_evaluator.run([y])
print(state.metrics["is"])
.. testoutput::
1.0
.. versionadded:: 0.4.6
"""
_state_dict_all_req_keys = ("_num_examples", "_prob_total", "_total_kl_d")
def __init__(
self,
num_features: Optional[int] = None,
feature_extractor: Optional[torch.nn.Module] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
if num_features is None and feature_extractor is None:
num_features = 1000
feature_extractor = InceptionModel(return_features=False, device=device)
self._eps = 1e-16
super(InceptionScore, self).__init__(
num_features=num_features,
feature_extractor=feature_extractor,
output_transform=output_transform,
device=device,
)
@reinit__is_reduced
def reset(self) -> None:
self._num_examples = 0
self._prob_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
self._total_kl_d = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
super(InceptionScore, self).reset()
@reinit__is_reduced
def update(self, output: torch.Tensor) -> None:
probabilities = self._extract_features(output)
prob_sum = torch.sum(probabilities, 0, dtype=torch.float64)
log_prob = torch.log(probabilities + self._eps)
if log_prob.dtype != probabilities.dtype:
log_prob = log_prob.to(probabilities)
kl_sum = torch.sum(probabilities * log_prob, 0, dtype=torch.float64)
self._num_examples += probabilities.shape[0]
self._prob_total += prob_sum
self._total_kl_d += kl_sum
@sync_all_reduce("_num_examples", "_prob_total", "_total_kl_d")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("InceptionScore must have at least one example before it can be computed.")
mean_probs = self._prob_total / self._num_examples
log_mean_probs = torch.log(mean_probs + self._eps)
if log_mean_probs.dtype != self._prob_total.dtype:
log_mean_probs = log_mean_probs.to(self._prob_total)
excess_entropy = self._prob_total * log_mean_probs
avg_kl_d = torch.sum(self._total_kl_d - excess_entropy) / self._num_examples
return torch.exp(avg_kl_d).item()
|
import math
from typing import Any, Callable, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
from ignite.metrics.nlp.utils import modified_precision
__all__ = ["Bleu"]
def _closest_ref_length(references: Sequence[Sequence[Any]], hyp_len: int) -> int:
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len))
return closest_ref_len
class _Smoother:
"""
Smoothing helper
http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
"""
def __init__(self, method: str):
valid = ["no_smooth", "smooth1", "nltk_smooth2", "smooth2"]
if method not in valid:
raise ValueError(f"Smooth is not valid (expected: {valid}, got: {method})")
self.smooth = method
def __call__(self, numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
method = getattr(self, self.smooth)
return method(numerators, denominators)
@staticmethod
def smooth1(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
epsilon = 0.1
denominators_ = [max(1, d.item()) for d in denominators]
return [n.item() / d if n != 0 else epsilon / d for n, d in zip(numerators, denominators_)]
@staticmethod
def nltk_smooth2(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
denominators_ = torch.tensor([max(1, d.item()) for d in denominators])
return _Smoother._smooth2(numerators, denominators_)
@staticmethod
def smooth2(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
return _Smoother._smooth2(numerators, denominators)
@staticmethod
def _smooth2(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
return [
(n.item() + 1) / (d.item() + 1) if i != 0 else n.item() / d.item()
for i, (n, d) in enumerate(zip(numerators, denominators))
]
@staticmethod
def no_smooth(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
denominators_ = [max(1, d) for d in denominators]
return [n.item() / d for n, d in zip(numerators, denominators_)]
class Bleu(Metric):
r"""Calculates the `BLEU score <https://en.wikipedia.org/wiki/BLEU>`_.
.. math::
\text{BLEU} = b_{p} \cdot \exp \left( \sum_{n=1}^{N} w_{n} \: \log p_{n} \right)
where :math:`N` is the order of n-grams, :math:`b_{p}` is a sentence brevety penalty, :math:`w_{n}` are
positive weights summing to one and :math:`p_{n}` are modified n-gram precisions.
More details can be found in `Papineni et al. 2002`__.
__ https://www.aclweb.org/anthology/P02-1040
In addition, a review of smoothing techniques can be found in `Chen et al. 2014`__
__ https://aclanthology.org/W14-3346.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) - a list of hypotheses sentences.
- `y` (list(list(list(str))) - a corpus of lists of reference sentences w.r.t hypotheses.
Remark :
This implementation is inspired by nltk
Args:
ngram: order of n-grams.
smooth: enable smoothing. Valid are ``no_smooth``, ``smooth1``, ``nltk_smooth2`` or ``smooth2``.
Default: ``no_smooth``.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
average: specifies which type of averaging to use (macro or micro)
for more details refer https://www.nltk.org/_modules/nltk/translate/bleu_score.html
Default: "macro"
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics.nlp import Bleu
m = Bleu(ngram=4, smooth="smooth1")
y_pred = "the the the the the the the"
y = ["the cat is on the mat", "there is a cat on the mat"]
m.update(([y_pred.split()], [[_y.split() for _y in y]]))
print(m.compute())
.. testoutput::
tensor(0.0393, dtype=torch.float64)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
- ``update`` method has changed and now works on batch of inputs.
- added ``average`` option to handle micro and macro averaging modes.
"""
def __init__(
self,
ngram: int = 4,
smooth: str = "no_smooth",
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
average: str = "macro",
):
if ngram <= 0:
raise ValueError(f"ngram order must be greater than zero (got: {ngram})")
self.ngrams_order = ngram
self.weights = [1 / self.ngrams_order] * self.ngrams_order
self.smoother = _Smoother(method=smooth)
if average not in ["macro", "micro"]:
raise ValueError(f'Average must be either "macro" or "micro" (got: {average})')
self.average = average
if average == "micro":
self._state_dict_all_req_keys = ("p_numerators", "p_denominators", "hyp_length_sum", "ref_length_sum")
else:
self._state_dict_all_req_keys = ("_sum_of_bleu", "_num_sentences")
super(Bleu, self).__init__(output_transform=output_transform, device=device)
def _n_gram_counter(
self,
references: Sequence[Sequence[Sequence[Any]]],
candidates: Sequence[Sequence[Any]],
p_numerators: torch.Tensor,
p_denominators: torch.Tensor,
) -> Tuple[int, int]:
if len(references) != len(candidates):
raise ValueError(
f"nb of candidates should be equal to nb of reference lists ({len(candidates)} != "
f"{len(references)})"
)
hyp_lengths = 0
ref_lengths = 0
# Iterate through each hypothesis and their corresponding references.
for refs, hyp in zip(references, candidates):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i in range(1, self.ngrams_order + 1):
numerator, denominator = modified_precision(refs, hyp, i)
p_numerators[i] += numerator
p_denominators[i] += denominator
# Calculate the hypothesis lengths
hyp_lengths += len(hyp)
# Calculate the closest reference lengths.
ref_lengths += _closest_ref_length(refs, len(hyp))
return hyp_lengths, ref_lengths
def _brevity_penalty_smoothing(
self, p_numerators: torch.Tensor, p_denominators: torch.Tensor, hyp_length_sum: int, ref_length_sum: int
) -> float:
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If no smoother, returns 0 if there's at least one a not matching n-grams]
if self.smoother.smooth == "no_smooth" and min(p_numerators[1:]).item() == 0:
return 0
# Calculate corpus-level brevity penalty.
if hyp_length_sum < ref_length_sum:
bp = math.exp(1 - ref_length_sum / hyp_length_sum) if hyp_length_sum > 0 else 0.0
else:
bp = 1.0
# Smoothing
p_n = self.smoother(p_numerators[1:], p_denominators[1:])
# Compute the geometric mean
s = [w_i * math.log(p_i) for w_i, p_i in zip(self.weights, p_n)]
gm = bp * math.exp(math.fsum(s))
return gm
def _sentence_bleu(self, references: Sequence[Sequence[Any]], candidates: Sequence[Any]) -> float:
return self._corpus_bleu([references], [candidates])
def _corpus_bleu(self, references: Sequence[Sequence[Sequence[Any]]], candidates: Sequence[Sequence[Any]]) -> float:
p_numerators: torch.Tensor = torch.zeros(self.ngrams_order + 1)
p_denominators: torch.Tensor = torch.zeros(self.ngrams_order + 1)
hyp_length_sum, ref_length_sum = self._n_gram_counter(
references=references, candidates=candidates, p_numerators=p_numerators, p_denominators=p_denominators
)
bleu_score = self._brevity_penalty_smoothing(
p_numerators=p_numerators,
p_denominators=p_denominators,
hyp_length_sum=hyp_length_sum,
ref_length_sum=ref_length_sum,
)
return bleu_score
@reinit__is_reduced
def reset(self) -> None:
if self.average == "macro":
self._sum_of_bleu = torch.tensor(0.0, dtype=torch.double, device=self._device)
self._num_sentences = 0
if self.average == "micro":
self.p_numerators = torch.zeros(self.ngrams_order + 1)
self.p_denominators = torch.zeros(self.ngrams_order + 1)
self.hyp_length_sum = 0
self.ref_length_sum = 0
@reinit__is_reduced
def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None:
y_pred, y = output
if self.average == "macro":
for refs, hyp in zip(y, y_pred):
self._sum_of_bleu += self._sentence_bleu(references=refs, candidates=hyp)
self._num_sentences += 1
elif self.average == "micro":
hyp_lengths, ref_lengths = self._n_gram_counter(
references=y, candidates=y_pred, p_numerators=self.p_numerators, p_denominators=self.p_denominators
)
self.hyp_length_sum += hyp_lengths
self.ref_length_sum += ref_lengths
@sync_all_reduce("_sum_of_bleu", "_num_sentences")
def _compute_macro(self) -> torch.Tensor:
if self._num_sentences == 0:
raise NotComputableError("Bleu must have at least one example before it can be computed.")
return self._sum_of_bleu / self._num_sentences
@sync_all_reduce("p_numerators", "p_denominators", "hyp_length_sum", "ref_length_sum")
def _compute_micro(self) -> float:
bleu_score = self._brevity_penalty_smoothing(
p_numerators=self.p_numerators,
p_denominators=self.p_denominators,
hyp_length_sum=self.hyp_length_sum,
ref_length_sum=self.ref_length_sum,
)
return bleu_score
def compute(self) -> None:
if self.average == "macro":
return self._compute_macro()
elif self.average == "micro":
return self._compute_micro()
|
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics import Metric
# These decorators helps with distributed settings
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
from ignite.metrics.nlp.utils import lcs, ngrams
__all__ = ["Rouge", "RougeN", "RougeL"]
class Score(namedtuple("Score", ["match", "candidate", "reference"])):
r"""
Computes precision and recall for given matches, candidate and reference lengths.
"""
def precision(self) -> float:
"""
Calculates precision.
"""
return self.match / self.candidate if self.candidate > 0 else 0
def recall(self) -> float:
"""
Calculates recall.
"""
return self.match / self.reference if self.reference > 0 else 0
def compute_ngram_scores(candidate: Sequence[Any], reference: Sequence[Any], n: int = 4) -> Score:
"""
Compute the score based on ngram co-occurence of sequences of items
Args:
candidate: candidate sequence of items
reference: reference sequence of items
n: ngram order
Returns:
The score containing the number of ngram co-occurences
.. versionadded:: 0.4.5
"""
# ngrams of the candidate
candidate_counter = ngrams(candidate, n)
# ngrams of the references
reference_counter = ngrams(reference, n)
# ngram co-occurences in the candidate and the references
match_counters = candidate_counter & reference_counter
# the score is defined using Fraction
return Score(
match=sum(match_counters.values()),
candidate=sum(candidate_counter.values()),
reference=sum(reference_counter.values()),
)
def compute_lcs_scores(candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
"""
Compute the score based on longest common subsequence of sequences of items
Args:
candidate: candidate sequence of items
reference: reference sequence of items
Returns:
The score containing the length of longest common subsequence
.. versionadded:: 0.4.5
"""
# lcs of candidate and reference
match = lcs(candidate, reference)
# the score is defined using Fraction
return Score(match=match, candidate=len(candidate), reference=len(reference))
class MultiRefReducer(metaclass=ABCMeta):
r"""
Reducer interface for multi-reference
"""
@abstractmethod
def __call__(self, scores: Sequence[Score]) -> Score:
pass
class MultiRefAverageReducer(MultiRefReducer):
r"""
Reducer for averaging the scores
"""
def __call__(self, scores: Sequence[Score]) -> Score:
match = sum([score.match for score in scores])
candidate = sum([score.candidate for score in scores])
reference = sum([score.reference for score in scores])
return Score(match=match, candidate=candidate, reference=reference)
class MultiRefBestReducer(MultiRefReducer):
r"""
Reducer for selecting the best score
"""
def __call__(self, scores: Sequence[Score]) -> Score:
return max(scores, key=lambda x: x.recall())
class _BaseRouge(Metric):
r"""
Rouge interface for Rouge-L and Rouge-N
"""
_state_dict_all_req_keys = ("_recall", "_precision", "_fmeasure", "_num_examples")
def __init__(
self,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(_BaseRouge, self).__init__(output_transform=output_transform, device=device)
self._alpha = alpha
if not 0 <= self._alpha <= 1:
raise ValueError(f"alpha must be in interval [0, 1] (got : {self._alpha})")
self._multiref = multiref
valid_multiref = ["best", "average"]
if self._multiref not in valid_multiref:
raise ValueError(f"multiref : valid values are {valid_multiref} (got : {self._multiref})")
self._mutliref_reducer = self._get_multiref_reducer()
def _get_multiref_reducer(self) -> MultiRefReducer:
if self._multiref == "average":
return MultiRefAverageReducer()
return MultiRefBestReducer()
@reinit__is_reduced
def reset(self) -> None:
self._recall = 0.0
self._precision = 0.0
self._fmeasure = 0.0
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None:
candidates, references = output
for _candidate, _reference in zip(candidates, references):
multiref_scores = [self._compute_score(candidate=_candidate, reference=_ref) for _ref in _reference]
score = self._mutliref_reducer(multiref_scores)
precision = score.precision()
recall = score.recall()
self._precision += precision
self._recall += recall
precision_recall = precision * recall
if precision_recall > 0: # avoid zero division
self._fmeasure += precision_recall / ((1 - self._alpha) * precision + self._alpha * recall)
self._num_examples += 1
@sync_all_reduce("_precision", "_recall", "_fmeasure", "_num_examples")
def compute(self) -> Mapping:
if self._num_examples == 0:
raise NotComputableError("Rouge metric must have at least one example before be computed")
return {
f"{self._metric_name()}-P": float(self._precision / self._num_examples),
f"{self._metric_name()}-R": float(self._recall / self._num_examples),
f"{self._metric_name()}-F": float(self._fmeasure / self._num_examples),
}
@abstractmethod
def _compute_score(self, candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
pass
@abstractmethod
def _metric_name(self) -> str:
pass
class RougeN(_BaseRouge):
r"""Calculates the Rouge-N score.
The Rouge-N is based on the ngram co-occurences of candidates and references.
More details can be found in `Lin 2004`__.
__ https://www.aclweb.org/anthology/W04-1013.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) must be a sequence of tokens.
- `y` (list(list(list(str))) must be a list of sequence of tokens.
Args:
ngram: ngram order (default: 4).
multiref: reduces scores for multi references. Valid values are "best" and "average"
(default: "average").
alpha: controls the importance between recall and precision (alpha -> 0: recall is more important, alpha -> 1:
precision is more important)
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics import RougeN
m = RougeN(ngram=2, multiref="best")
candidate = "the cat is not there".split()
references = [
"the cat is on the mat".split(),
"there is a cat on the mat".split()
]
m.update(([candidate], [references]))
print(m.compute())
.. testoutput::
{'Rouge-2-P': 0.5, 'Rouge-2-R': 0.4, 'Rouge-2-F': 0.4}
.. versionadded:: 0.4.5
"""
def __init__(
self,
ngram: int = 4,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(RougeN, self).__init__(multiref=multiref, alpha=alpha, output_transform=output_transform, device=device)
self._ngram = ngram
if self._ngram < 1:
raise ValueError(f"ngram order must be greater than zero (got : {self._ngram})")
def _compute_score(self, candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
return compute_ngram_scores(candidate=candidate, reference=reference, n=self._ngram)
def _metric_name(self) -> str:
return f"Rouge-{self._ngram}"
class RougeL(_BaseRouge):
r"""Calculates the Rouge-L score.
The Rouge-L is based on the length of the longest common subsequence of candidates and references.
More details can be found in `Lin 2004`__.
__ https://www.aclweb.org/anthology/W04-1013.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) must be a sequence of tokens.
- `y` (list(list(list(str))) must be a list of sequence of tokens.
Args:
multiref: reduces scores for multi references. Valid values are "best" and "average" (default: "average").
alpha: controls the importance between recall and precision (alpha -> 0: recall is more important, alpha -> 1:
precision is more important)
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics import RougeL
m = RougeL(multiref="best")
candidate = "the cat is not there".split()
references = [
"the cat is on the mat".split(),
"there is a cat on the mat".split()
]
m.update(([candidate], [references]))
print(m.compute())
.. testoutput::
{'Rouge-L-P': 0.6, 'Rouge-L-R': 0.5, 'Rouge-L-F': 0.5}
.. versionadded:: 0.4.5
"""
def __init__(
self,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(RougeL, self).__init__(multiref=multiref, alpha=alpha, output_transform=output_transform, device=device)
def _compute_score(self, candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
return compute_lcs_scores(candidate=candidate, reference=reference)
def _metric_name(self) -> str:
return "Rouge-L"
class Rouge(Metric):
r"""Calculates the Rouge score for multiples Rouge-N and Rouge-L metrics.
More details can be found in `Lin 2004`__.
__ https://www.aclweb.org/anthology/W04-1013.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) must be a sequence of tokens.
- `y` (list(list(list(str))) must be a list of sequence of tokens.
Args:
variants: set of metrics computed. Valid inputs are "L" and integer 1 <= n <= 9.
multiref: reduces scores for multi references. Valid values are "best" and "average" (default: "average").
alpha: controls the importance between recall and precision (alpha -> 0: recall is more important, alpha -> 1:
precision is more important)
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics import Rouge
m = Rouge(variants=["L", 2], multiref="best")
candidate = "the cat is not there".split()
references = [
"the cat is on the mat".split(),
"there is a cat on the mat".split()
]
m.update(([candidate], [references]))
print(m.compute())
.. testoutput::
{'Rouge-L-P': 0.6, 'Rouge-L-R': 0.5, 'Rouge-L-F': 0.5, 'Rouge-2-P': 0.5, 'Rouge-2-R': 0.4, 'Rouge-2-F': 0.4}
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
``update`` method has changed and now works on batch of inputs.
"""
def __init__(
self,
variants: Optional[Sequence[Union[str, int]]] = None,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if variants is None or len(variants) == 0:
variants = [1, 2, 4, "L"]
self.internal_metrics: List[_BaseRouge] = []
for m in variants:
variant: Optional[_BaseRouge] = None
if isinstance(m, str) and m == "L":
variant = RougeL(multiref=multiref, alpha=alpha, output_transform=output_transform, device=device)
elif isinstance(m, int):
variant = RougeN(
ngram=m, multiref=multiref, alpha=alpha, output_transform=output_transform, device=device
)
else:
raise ValueError("variant must be 'L' or integer greater to zero")
self.internal_metrics.append(variant)
super(Rouge, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
for m in self.internal_metrics:
m.reset()
@reinit__is_reduced
def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None:
for m in self.internal_metrics:
m.update(output)
def compute(self) -> Mapping:
results = {}
for m in self.internal_metrics:
results.update(m.compute())
return results
|
from ignite.metrics.nlp.bleu import Bleu
from ignite.metrics.nlp.rouge import Rouge, RougeL, RougeN
__all__ = [
"Bleu",
"Rouge",
"RougeN",
"RougeL",
]
|
from collections import Counter
from typing import Any, Sequence, Tuple
__all__ = ["ngrams", "lcs", "modified_precision"]
def ngrams(sequence: Sequence[Any], n: int) -> Counter:
"""
Generate the ngrams from a sequence of items
Args:
sequence: sequence of items
n: n-gram order
Returns:
A counter of ngram objects
.. versionadded:: 0.4.5
"""
return Counter([tuple(sequence[i : i + n]) for i in range(len(sequence) - n + 1)])
def lcs(seq_a: Sequence[Any], seq_b: Sequence[Any]) -> int:
"""
Compute the length of the longest common subsequence in two sequence of items
https://en.wikipedia.org/wiki/Longest_common_subsequence_problem
Args:
seq_a: first sequence of items
seq_b: second sequence of items
Returns:
The length of the longest common subsequence
.. versionadded:: 0.4.5
"""
m = len(seq_a)
n = len(seq_b)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
dp[i][j] = 0
elif seq_a[i - 1] == seq_b[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
return dp[m][n]
def modified_precision(references: Sequence[Sequence[Any]], candidate: Any, n: int) -> Tuple[int, int]:
"""
Compute the modified precision
.. math::
p_{n} = \frac{m_{n}}{l_{n}}
where m_{n} is the number of matched n-grams between translation T and its reference R, and l_{n} is the
total number of n-grams in the translation T.
More details can be found in `Papineni et al. 2002`__.
__ https://www.aclweb.org/anthology/P02-1040.pdf
Args:
references: list of references R
candidate: translation T
n: n-gram order
Returns:
The length of the longest common subsequence
.. versionadded:: 0.4.5
"""
# ngrams of the candidate
counts = ngrams(candidate, n)
# union of ngrams of references
max_counts: Counter = Counter()
for reference in references:
max_counts |= ngrams(reference, n)
# clipped count of the candidate and references
clipped_counts = counts & max_counts
return sum(clipped_counts.values()), sum(counts.values())
|
from ignite.distributed.auto import *
from ignite.distributed.comp_models import native, xla
from ignite.distributed.launcher import Parallel
from ignite.distributed.utils import *
|
import socket
from contextlib import contextmanager
from functools import wraps
from typing import Any, Callable, List, Mapping, Optional, Tuple, Union
import torch
from ignite.distributed.comp_models import (
_SerialModel,
has_hvd_support,
has_native_dist_support,
has_xla_support,
registered_computation_models,
)
from ignite.utils import setup_logger
__all__ = [
"backend",
"broadcast",
"device",
"available_backends",
"model_name",
"get_world_size",
"get_rank",
"get_local_rank",
"get_nproc_per_node",
"get_node_rank",
"get_nnodes",
"spawn",
"initialize",
"finalize",
"show_config",
"set_local_rank",
"all_reduce",
"all_gather",
"barrier",
"hostname",
"has_xla_support",
"has_native_dist_support",
"has_hvd_support",
"sync",
"registered_computation_models",
"one_rank_only",
"new_group",
"one_rank_first",
]
_model = _SerialModel()
_need_to_sync = True
def sync(temporary: bool = False) -> None:
"""Helper method to force this module to synchronize with current distributed context.
This method should be used when distributed context is manually created or destroyed.
Args:
temporary: If True, distributed model synchronization is done every call of ``idist.get_*`` methods.
This may have a negative performance impact.
"""
global _model
for comp_model_cls in registered_computation_models:
if comp_model_cls == _SerialModel:
continue
model = comp_model_cls.create_from_context()
if model is not None:
_set_model(model, temporary=temporary)
return
_model = _SerialModel()
def device() -> torch.device:
"""Returns current device according to current distributed configuration.
- `torch.device("cpu")` if no distributed configuration or torch native gloo distributed configuration
- `torch.device("cuda:local_rank")` if torch native nccl or horovod distributed configuration
- `torch.device("xla:index")` if XLA distributed configuration
Returns:
torch.device
.. versionchanged:: 0.4.2
Added Horovod distributed framework.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.device()
def backend() -> Optional[str]:
"""Returns computation model's backend.
- `None` for no distributed configuration
- "nccl" or "gloo" or "mpi" for native torch distributed configuration
- "xla-tpu" for XLA distributed configuration
- "horovod" for Horovod distributed framework
Returns:
str or None
.. versionchanged:: 0.4.2
Added Horovod distributed framework.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.backend()
def available_backends() -> Tuple[str, ...]:
"""Returns available backends."""
out: Tuple[str, ...] = ()
for m in registered_computation_models:
out += m.available_backends
return out
def model_name() -> str:
"""Returns distributed configuration name (given by ignite)
- `serial` for no distributed configuration
- `native-dist` for native torch distributed configuration
- `xla-dist` for XLA distributed configuration
- `horovod-dist` for Horovod distributed framework
.. versionchanged:: 0.4.2
`horovod-dist` will be returned for Horovod distributed framework.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.name
def get_world_size() -> int:
"""Returns world size of current distributed configuration. Returns 1 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_world_size()
def get_rank() -> int:
"""Returns process rank within current distributed configuration. Returns 0 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_rank()
def get_local_rank() -> int:
"""Returns local process rank within current distributed configuration.
Returns 0 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_local_rank()
def get_nproc_per_node() -> int:
"""Returns number of processes (or tasks) per node within current distributed configuration.
Returns 1 if no distributed configuration.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_nproc_per_node()
def get_nnodes() -> int:
"""Returns number of nodes within current distributed configuration.
Returns 1 if no distributed configuration.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_nnodes()
def get_node_rank() -> int:
"""Returns node rank within current distributed configuration.
Returns 0 if no distributed configuration.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_node_rank()
def hostname() -> str:
"""Returns host name for current process within current distributed configuration."""
return socket.gethostname()
def spawn(
backend: str,
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
**kwargs: Any,
) -> None:
"""Spawns ``nproc_per_node`` processes that run ``fn`` with ``args``/``kwargs_dict`` and initialize
distributed configuration defined by ``backend``.
Args:
backend: backend to use: `nccl`, `gloo`, `xla-tpu`, `horovod`
fn: function to called as the entrypoint of the spawned process.
This function must be defined at the top level of a module so it can be pickled and spawned.
This is a requirement imposed by multiprocessing. The function is called as ``fn(i, *args, **kwargs_dict)``,
where `i` is the process index and args is the passed through tuple of arguments.
args: arguments passed to `fn`.
kwargs_dict: kwargs passed to `fn`.
nproc_per_node: number of processes to spawn on a single node. Default, 1.
kwargs: acceptable kwargs according to provided backend:
- | "nccl" or "gloo" : ``nnodes`` (default, 1), ``node_rank`` (default, 0), ``master_addr``
| (default, "127.0.0.1"), ``master_port`` (default, 2222), ``init_method`` (default, "env://"),
| `timeout` to `dist.init_process_group`_ function
| and kwargs for `mp.start_processes`_ function.
- | "xla-tpu" : ``nnodes`` (default, 1), ``node_rank`` (default, 0) and kwargs to `xmp.spawn`_ function.
- | "horovod": ``hosts`` (default, None) and other kwargs to `hvd_run`_ function. Arguments ``nnodes=1``
| and ``node_rank=0`` are tolerated and ignored, otherwise an exception is raised.
Examples:
1) Launch single node multi-GPU training using torch native distributed framework
.. code-block:: python
# >>> python main.py
# main.py
import ignite.distributed as idist
def train_fn(local_rank, a, b, c, d=12):
import torch.distributed as dist
assert dist.is_available() and dist.is_initialized()
assert dist.get_world_size() == 4
device = idist.device()
assert device == torch.device(f"cuda:{local_rank}")
idist.spawn("nccl", train_fn, args=(a, b, c), kwargs_dict={"d": 23}, nproc_per_node=4)
2) Launch multi-node multi-GPU training using torch native distributed framework
.. code-block:: python
# >>> (node 0): python main.py --node_rank=0 --nnodes=8 --master_addr=master --master_port=2222
# >>> (node 1): python main.py --node_rank=1 --nnodes=8 --master_addr=master --master_port=2222
# >>> ...
# >>> (node 7): python main.py --node_rank=7 --nnodes=8 --master_addr=master --master_port=2222
# main.py
import torch
import ignite.distributed as idist
def train_fn(local_rank, nnodes, nproc_per_node):
import torch.distributed as dist
assert dist.is_available() and dist.is_initialized()
assert dist.get_world_size() == nnodes * nproc_per_node
device = idist.device()
assert device == torch.device(f"cuda:{local_rank}")
idist.spawn(
"nccl",
train_fn,
args=(nnodes, nproc_per_node),
nproc_per_node=nproc_per_node,
nnodes=nnodes,
node_rank=node_rank,
master_addr=master_addr,
master_port=master_port
)
3) Launch single node multi-TPU training (for example on Google Colab) using PyTorch/XLA
.. code-block:: python
# >>> python main.py
# main.py
import ignite.distributed as idist
def train_fn(local_rank, a, b, c, d=12):
import torch_xla.core.xla_model as xm
assert xm.get_world_size() == 8
device = idist.device()
assert "xla" in device.type
idist.spawn("xla-tpu", train_fn, args=(a, b, c), kwargs_dict={"d": 23}, nproc_per_node=8)
.. _dist.init_process_group: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
.. _mp.start_processes: https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn
.. _xmp.spawn: https://pytorch.org/xla/release/1.6/index.html#torch_xla.distributed.xla_multiprocessing.spawn
.. _hvd_run: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.run
.. versionchanged:: 0.4.2
``backend`` now accepts `horovod` distributed framework.
"""
_assert_backend(backend)
if kwargs_dict is None:
kwargs_dict = {}
for comp_model_cls in registered_computation_models:
if backend not in comp_model_cls.available_backends:
continue
comp_model_cls.spawn(
fn, args=args, kwargs_dict=kwargs_dict, nproc_per_node=nproc_per_node, backend=backend, **kwargs
)
def all_reduce(
tensor: Union[torch.Tensor, float], op: str = "SUM", group: Optional[Union[Any, List[int]]] = None
) -> Union[torch.Tensor, float]:
"""Helper method to perform all reduce operation.
Args:
tensor: tensor or number to collect across participating processes.
op: reduction operation, "SUM" by default. Possible values: "SUM", "PRODUCT", "MIN", "MAX", "AND", "OR".
Horovod backend supports only "SUM", "AVERAGE", "ADASUM", "MIN", "MAX", "PRODUCT".
group: list of integer or the process group for each backend. If None, the default process group will be used.
Returns:
torch.Tensor or number
.. versionchanged:: 0.4.11
added ``group``
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
if isinstance(group, list) and all(isinstance(item, int) for item in group):
group = _model.new_group(group)
return _model.all_reduce(tensor, op, group=group)
def all_gather(
tensor: Union[torch.Tensor, float, str], group: Optional[Union[Any, List[int]]] = None
) -> Union[torch.Tensor, float, List[float], List[str]]:
"""Helper method to perform all gather operation.
Args:
tensor: tensor or number or str to collect across participating processes. If tensor, it should have the
same shape across processes.
group: list of integer or the process group for each backend. If None, the default process group will be used.
Returns:
If input is a tensor, returns a torch.Tensor of shape ``(world_size * tensor.shape[0], tensor.shape[1], ...)``.
If input is a number, a torch.Tensor of shape ``(world_size, )`` is returned and finally a list of strings
is returned if input is a string. If current process does not belong to `group`, the very ``tensor`` is
returned.
.. versionchanged:: 0.4.11
added ``group``
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
if isinstance(group, list) and all(isinstance(item, int) for item in group):
group = _model.new_group(group)
return _model.all_gather(tensor, group=group)
def broadcast(
tensor: Union[torch.Tensor, float, str, None], src: int = 0, safe_mode: bool = False
) -> Union[torch.Tensor, float, str]:
"""Helper method to perform broadcast operation.
Args:
tensor: tensor or number or str to broadcast to participating processes.
Make sure to respect data type of torch tensor input for all processes, otherwise execution will crash.
Can use None for non-source data with ``safe_mode=True``.
src: source rank. Default, 0.
safe_mode: if True, non source input data can be ``None`` or anything (will be discarded), otherwise data type
of the input ``tensor`` should be respected for all processes. Please, keep in mind, this mode is working
only for dense tensors as source input if a tensor is provided. It also leads to some additional
collectives before the broadcast, making it slower than without using this mode. Default, False.
Returns:
torch.Tensor or string or number
Examples:
.. code-block:: python
y = None
if idist.get_rank() == 0:
t1 = torch.rand(4, 5, 6, device=idist.device())
s1 = "abc"
x = 12.3456
y = torch.rand(1, 2, 3, device=idist.device())
else:
t1 = torch.empty(4, 5, 6, device=idist.device())
s1 = ""
x = 0.0
# Broadcast tensor t1 from rank 0 to all processes
t1 = idist.broadcast(t1, src=0)
assert isinstance(t1, torch.Tensor)
# Broadcast string s1 from rank 0 to all processes
s1 = idist.broadcast(s1, src=0)
# >>> s1 = "abc"
# Broadcast float number x from rank 0 to all processes
x = idist.broadcast(x, src=0)
# >>> x = 12.3456
# Broadcast any of those types from rank 0,
# but other ranks do not define the placeholder
y = idist.broadcast(y, src=0, safe_mode=True)
assert isinstance(y, torch.Tensor)
.. versionadded:: 0.4.2
.. versionchanged:: 0.4.5
added ``safe_mode``
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.broadcast(tensor, src=src, safe_mode=safe_mode)
def barrier() -> None:
"""Helper method to synchronize all processes."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
_model.barrier()
def new_group(ranks: List[int], **kwargs: Any) -> Any:
"""Helper method to make group for each backend from ranks.
Args:
ranks: subset of ranks to be grouped.
kwargs: acceptable kwargs according to provided backend:
- | "nccl" or "gloo" : ``backend (=None)``, ``pg_options (=None)``.
Examples:
Launch single node multi-GPU training with ``torchrun`` utility.
.. code-block:: python
import ignite.distributed as idist
ranks = [0, 1]
group = idist.new_group(ranks)
.. versionadded:: 0.4.11
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.new_group(ranks, **kwargs)
def set_local_rank(index: int) -> None:
"""Method to hint the local rank in case if torch native distributed context is created by user
without using :meth:`~ignite.distributed.utils.initialize` or :meth:`~ignite.distributed.utils.spawn`.
Args:
index: local rank or current process index
Examples:
User set up torch native distributed process group
.. code-block:: python
import ignite.distributed as idist
def run(local_rank, *args, **kwargs):
idist.set_local_rank(local_rank)
# ...
dist.init_process_group(**dist_info)
# ...
"""
from ignite.distributed.comp_models.base import ComputationModel
ComputationModel._ext_local_rank = index
def _set_model(model: Any, temporary: bool = False) -> None:
global _model, _need_to_sync
_model = model
_need_to_sync = True
if not isinstance(_model, _SerialModel) and not temporary:
_need_to_sync = False
def _assert_backend(backend: str) -> None:
backends = available_backends()
if backend not in backends:
raise ValueError(f"Backend should be one of '{backends}'")
def initialize(backend: str, **kwargs: Any) -> None:
"""Initializes distributed configuration according to provided ``backend``
Args:
backend: backend: `nccl`, `gloo`, `xla-tpu`, `horovod`.
kwargs: acceptable kwargs according to provided backend:
- | "nccl" or "gloo" : ``timeout(=timedelta(minutes=30))``, ``init_method(=None)``,
| ``rank(=None)``, ``world_size(=None)``.
| By default, ``init_method`` will be "env://". See more info about parameters: `torch_init`_.
- | "horovod" : comm(=None), more info: `hvd_init`_.
Examples:
Launch single node multi-GPU training with ``torchrun`` utility.
.. code-block:: python
# >>> torchrun --nproc_per_node=4 main.py
# main.py
import ignite.distributed as idist
def train_fn(local_rank, a, b, c):
import torch.distributed as dist
assert dist.is_available() and dist.is_initialized()
assert dist.get_world_size() == 4
device = idist.device()
assert device == torch.device(f"cuda:{local_rank}")
backend = "nccl" # or "gloo" or "horovod" or "xla-tpu"
idist.initialize(backend)
# or for torch native distributed on Windows:
# idist.initialize("nccl", init_method="file://tmp/shared")
local_rank = idist.get_local_rank()
train_fn(local_rank, a, b, c)
idist.finalize()
.. _torch_init: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
.. _hvd_init: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.torch
.. versionchanged:: 0.4.2
``backend`` now accepts `horovod` distributed framework.
.. versionchanged:: 0.4.5
``kwargs`` now accepts ``init_method``, ``rank``, ``world_size`` for PyTorch native distributed backend.
"""
if not (has_xla_support or has_native_dist_support or has_hvd_support):
# nothing to do => serial model
# maybe warn about this
return
_assert_backend(backend)
for comp_model_cls in registered_computation_models:
if backend not in comp_model_cls.available_backends:
continue
_set_model(comp_model_cls(backend, **kwargs))
def finalize() -> None:
"""Finalizes distributed configuration. For example, in case of native pytorch distributed configuration,
it calls ``dist.destroy_process_group()``.
"""
_model.finalize()
_set_model(_SerialModel())
def show_config() -> None:
"""Helper method to display distributed configuration via ``logging``."""
# setup parallel logger
logger = setup_logger(__name__)
logger.info(f"distributed configuration: {model_name()}")
logger.info(f"backend: {backend()}")
logger.info(f"device: {device().type}")
logger.info(f"hostname: {hostname()}")
logger.info(f"world size: {get_world_size()}")
logger.info(f"rank: {get_rank()}")
logger.info(f"local rank: {get_local_rank()}")
logger.info(f"num processes per_node: {get_nproc_per_node()}")
logger.info(f"num nodes: {get_nnodes()}")
logger.info(f"node rank: {get_node_rank()}")
def one_rank_only(rank: int = 0, with_barrier: bool = False) -> Callable:
"""Decorator to filter handlers wrt a rank number
Args:
rank: rank number of the handler (default: 0).
with_barrier: synchronisation with a barrier (default: False).
Examples:
.. code-block:: python
engine = ...
@engine.on(...)
@one_rank_only() # means @one_rank_only(rank=0)
def some_handler(_):
...
@engine.on(...)
@one_rank_only(rank=1)
def some_handler(_):
...
"""
def _one_rank_only(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Optional[Any]:
ret = None
if get_rank() == rank:
ret = func(*args, **kwargs)
if with_barrier:
barrier()
return ret
return wrapper
return _one_rank_only
@contextmanager
def one_rank_first(rank: int = 0, local: bool = False) -> Any:
"""Context manager that ensures a specific rank runs first before others in a distributed
environment.
Args:
rank: rank of the process that should execute the code
block inside the context manager first. Default, 0.
local: flag to specify local rank or global rank.
If True ``rank`` argument will define a local rank to run first.
Default, False
Examples:
.. code-block:: python
def download_dataset():
...
with idist.one_rank_first():
ds = download_dataset()
dp = ds[0]
.. versionadded:: 0.5.0
"""
current_rank = get_local_rank() if local else get_rank()
size = get_nproc_per_node() if local else get_world_size()
if rank >= size or rank < 0:
raise ValueError(f"rank should be between 0 and {size - 1}, but given {rank}")
if current_rank != rank:
barrier()
yield
if current_rank == rank:
barrier()
|
from typing import Any, Callable, Dict, Optional
from ignite.distributed import utils as idist
from ignite.utils import setup_logger
__all__ = [
"Parallel",
]
class Parallel:
"""Distributed launcher context manager to simplify distributed configuration setup for multiple backends:
- backends from native torch distributed configuration: "nccl", "gloo" and "mpi" (if available)
- XLA on TPUs via `pytorch/xla <https://github.com/pytorch/xla>`_ (if installed)
- using `Horovod distributed framework <https://horovod.readthedocs.io>`_ (if installed)
Namely, it can:
1) Spawn ``nproc_per_node`` child processes and initialize a processing group according to
provided ``backend`` (useful for standalone scripts).
2) Only initialize a processing group given the ``backend``
(useful with tools like `torchrun`_, `horovodrun`_, etc).
Args:
backend: backend to use: `nccl`, `gloo`, `xla-tpu`, `horovod`. If None, no distributed
configuration.
nproc_per_node: optional argument, number of processes per
node to specify. If not None, :meth:`~ignite.distributed.launcher.Parallel.run`
will spawn ``nproc_per_node`` processes that run input function with its arguments.
nnodes: optional argument, number of nodes participating in distributed configuration.
If not None, :meth:`~ignite.distributed.launcher.Parallel.run` will spawn ``nproc_per_node``
processes that run input function with its arguments. Total world size is `nproc_per_node * nnodes`.
This option is only supported by native torch distributed module. For other modules, please setup
``spawn_kwargs`` with backend specific arguments.
node_rank: optional argument, current machine index. Mandatory argument if ``nnodes`` is
specified and larger than one.
This option is only supported by native torch distributed module. For other modules, please setup
``spawn_kwargs`` with backend specific arguments.
master_addr: optional argument, master node TCP/IP address for torch native backends
(`nccl`, `gloo`). Mandatory argument if ``nnodes`` is specified and larger than one.
master_port: optional argument, master node port for torch native backends
(`nccl`, `gloo`). Mandatory argument if ``master_addr`` is specified.
init_method: optional argument to specify processing group initialization method for torch native
backends (`nccl`, `gloo`). Default, "env://".
See more info: `dist.init_process_group`_.
spawn_kwargs: kwargs to ``idist.spawn`` function.
Examples:
1) Single node or Multi-node, Multi-GPU training launched with `torchrun` or `horovodrun`_
tools
Single node option with 4 GPUs
.. code-block:: bash
torchrun --nproc_per_node=4 main.py
# or if installed horovod
horovodrun -np=4 python main.py
Multi-node option : 2 nodes with 8 GPUs each
.. code-block:: bash
## node 0
torchrun --nnodes=2 --node_rank=0 --master_addr=master --master_port=3344 \
--nproc_per_node=8 main.py
# or if installed horovod
horovodrun -np 16 -H hostname1:8,hostname2:8 python main.py
## node 1
torchrun --nnodes=2 --node_rank=1 --master_addr=master --master_port=3344 \
--nproc_per_node=8 main.py
User code is the same for both options:
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
backend = "nccl" # or "horovod" if package is installed
config = {"key": "value"}
with idist.Parallel(backend=backend) as parallel:
parallel.run(training, config, a=1, b=2)
2) Single node, Multi-GPU training launched with `python`
.. code-block:: bash
python main.py
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
backend = "nccl" # or "horovod" if package is installed
# no "init_method" was specified , "env://" will be used
with idist.Parallel(backend=backend, nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
Initializing the process using ``file://``
.. code-block:: python
with idist.Parallel(backend=backend, init_method='file:///d:/tmp/some_file', nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
Initializing the process using ``tcp://``
.. code-block:: python
with idist.Parallel(backend=backend, init_method='tcp://10.1.1.20:23456', nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
3) Single node, Multi-TPU training launched with `python`
.. code-block:: bash
python main.py
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
config = {"key": "value"}
with idist.Parallel(backend="xla-tpu", nproc_per_node=8) as parallel:
parallel.run(training, config, a=1, b=2)
4) Multi-node, Multi-GPU training launched with `python`. For example, 2 nodes with 8 GPUs:
Using torch native distributed framework:
.. code-block:: bash
# node 0
python main.py --node_rank=0
# node 1
python main.py --node_rank=1
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
dist_config = {
"nproc_per_node": 8,
"nnodes": 2,
"node_rank": args.node_rank,
"master_addr": "master",
"master_port": 15000
}
config = {"key": "value"}
with idist.Parallel(backend="nccl", **dist_config) as parallel:
parallel.run(training, config, a=1, b=2)
.. _torchrun: https://pytorch.org/docs/stable/elastic/run.html#launcher-api
.. _horovodrun: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.run
.. _dist.init_process_group: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
.. versionchanged:: 0.4.2
``backend`` now accepts `horovod` distributed framework.
.. versionchanged:: 0.4.5
``init_method`` added.
"""
def __init__(
self,
backend: Optional[str] = None,
nproc_per_node: Optional[int] = None,
nnodes: Optional[int] = None,
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
init_method: Optional[str] = None,
**spawn_kwargs: Any,
) -> None:
if backend is not None:
if backend not in idist.available_backends():
raise ValueError(f"Unknown backend '{backend}'. Available backends: {idist.available_backends()}")
else:
arg_names = ["nproc_per_node", "nnodes", "node_rank", "master_addr", "master_port"]
arg_values = [nproc_per_node, nnodes, node_rank, master_addr, master_port]
for name, value in zip(arg_names, arg_values):
if value is not None:
raise ValueError(f"If backend is None, argument '{name}' should be also None, but given {value}")
self.backend = backend
self._spawn_params = None
self.init_method = init_method
if self.backend is not None:
if nproc_per_node is not None:
self._spawn_params = self._setup_spawn_params(
nproc_per_node, nnodes, node_rank, master_addr, master_port, init_method, **spawn_kwargs
)
# The logger will be setup after the idist.initialize() call
self._logger = None
@staticmethod
def _setup_spawn_params(
nproc_per_node: int,
nnodes: Optional[int] = None,
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
init_method: Optional[str] = None,
**spawn_kwargs: Any,
) -> Dict:
if nproc_per_node < 1:
raise ValueError(f"Argument nproc_per_node should positive, but given {nproc_per_node}")
if nnodes is None:
nnodes = 1
if nnodes < 1:
raise ValueError(f"Argument nnodes should positive, but given {nnodes}")
if node_rank is None:
if nnodes > 1:
raise ValueError("If number of nodes larger than one, arguments node_rank should be given")
node_rank = 0
if node_rank >= nnodes or node_rank < 0:
raise ValueError(f"Argument node_rank should be between 0 and {nnodes - 1}, but given {node_rank}")
if nnodes > 1 and (master_addr is None or master_port is None) and init_method is None:
raise ValueError(
"If number of nodes larger than one, arguments master_addr and master_port or init_method "
f"should be specified, but given master_addr={master_addr}, master_port={master_port} and "
f"init_method={init_method}."
)
params = {
"nproc_per_node": nproc_per_node,
"nnodes": nnodes,
"node_rank": node_rank,
"master_addr": master_addr,
"master_port": master_port,
"init_method": init_method,
}
params.update(spawn_kwargs)
return {k: v for k, v in params.items() if v is not None}
def run(self, func: Callable, *args: Any, **kwargs: Any) -> None:
"""Execute ``func`` with provided arguments in distributed context.
Args:
func: function to execute. First argument of the function should be `local_rank` - local process
index.
args: positional arguments of ``func`` (without `local_rank`).
kwargs: keyword arguments of ``func``.
Examples:
.. code-block:: python
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
config = {"key": "value"}
with idist.Parallel(backend=backend) as parallel:
parallel.run(training, config, a=1, b=2)
"""
if self._spawn_params is not None and self.backend is not None:
self._logger.info( # type: ignore[attr-defined]
f"Spawn function '{func}' in {self._spawn_params['nproc_per_node']} processes"
)
idist.spawn(self.backend, func, args=args, kwargs_dict=kwargs, **self._spawn_params)
else:
self._logger.info(f"- Run '{func}' in {idist.get_world_size()} processes") # type: ignore[attr-defined]
local_rank = idist.get_local_rank()
func(local_rank, *args, **kwargs)
self._logger.info("End of run") # type: ignore[attr-defined]
def __enter__(self) -> "Parallel":
if self.backend is not None and self._spawn_params is None:
idist.initialize(self.backend, init_method=self.init_method)
# The logger can be setup from now since idist.initialize() has been called (if needed)
self._logger = setup_logger(__name__ + "." + self.__class__.__name__) # type: ignore[assignment]
if self.backend is not None:
if self._spawn_params is None:
self._logger.info( # type: ignore[attr-defined]
f"Initialized processing group with backend: '{self.backend}'"
)
else:
self._logger.info( # type: ignore[attr-defined]
f"Initialized distributed launcher with backend: '{self.backend}'"
)
msg = "\n\t".join([f"{k}: {v}" for k, v in self._spawn_params.items() if v is not None])
self._logger.info(f"- Parameters to spawn processes: \n\t{msg}") # type: ignore[attr-defined]
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
if (self.backend is not None) and self._spawn_params is None:
self._logger.info( # type: ignore[attr-defined]
f"Finalized processing group with backend: '{self.backend}'"
)
idist.finalize()
|
import warnings
from typing import Any, Iterator, List, Optional, Union
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import Sampler
from ignite.distributed import utils as idist
from ignite.distributed.comp_models import horovod as idist_hvd, native as idist_native, xla as idist_xla
from ignite.utils import setup_logger
__all__ = ["auto_dataloader", "auto_model", "auto_optim", "DistributedProxySampler"]
def auto_dataloader(dataset: Dataset, **kwargs: Any) -> Union[DataLoader, "_MpDeviceLoader"]:
"""Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting
all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
Internally, we create a dataloader with provided kwargs while applying the following updates:
- batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.
- number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.
- if no sampler provided by user, a `torch DistributedSampler`_ is setup.
- if a `torch DistributedSampler`_ is provided by user, it is used without wrapping it.
- if another sampler is provided, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.
- if the default device is 'cuda', `pin_memory` is automatically set to `True`.
.. warning::
Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch
sampler is compatible with distributed configuration.
Args:
dataset: input torch dataset. If input dataset is `torch IterableDataset`_ then dataloader will be
created without any distributed sampling. Please, make sure that the dataset itself produces
different data on different ranks.
kwargs: keyword arguments for `torch DataLoader`_.
Returns:
`torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices
Examples:
.. code-block:: python
import ignite.distribted as idist
train_loader = idist.auto_dataloader(
train_dataset,
batch_size=32,
num_workers=4,
shuffle=True,
pin_memory="cuda" in idist.device().type,
drop_last=True,
)
.. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
.. _XLA MpDeviceLoader:
https://pytorch.org/xla/release/2.0/index.html#running-on-multiple-xla-devices-with-multi-processing
.. _torch DistributedSampler:
https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
.. _torch IterableDataset: https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset
"""
rank = idist.get_rank()
world_size = idist.get_world_size()
logger = setup_logger(__name__ + ".auto_dataloader")
if world_size > 1:
if "batch_size" in kwargs and kwargs["batch_size"] >= world_size:
kwargs["batch_size"] //= world_size
nproc = idist.get_nproc_per_node()
if "num_workers" in kwargs and kwargs["num_workers"] >= nproc:
kwargs["num_workers"] = (kwargs["num_workers"] + nproc - 1) // nproc
if "batch_sampler" not in kwargs:
if isinstance(dataset, IterableDataset):
logger.info(
"Found iterable dataset, dataloader will be created without any distributed sampling. "
"Please, make sure that the dataset itself produces different data on different ranks."
)
else:
sampler: Optional[Union[DistributedProxySampler, DistributedSampler, Sampler]]
sampler = kwargs.get("sampler", None)
if isinstance(sampler, DistributedSampler):
if sampler.rank != rank:
warnings.warn(f"Found distributed sampler with rank={sampler.rank}, but process rank is {rank}")
if sampler.num_replicas != world_size:
warnings.warn(
f"Found distributed sampler with num_replicas={sampler.num_replicas}, "
f"but world size is {world_size}"
)
elif sampler is None:
# removes "shuffle" from kwargs if sampler is used
shuffle = kwargs.pop("shuffle", True)
sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank, shuffle=shuffle)
else:
sampler = DistributedProxySampler(sampler, num_replicas=world_size, rank=rank)
kwargs["sampler"] = sampler
else:
warnings.warn(
"Found batch_sampler in provided kwargs. Please, make sure that it is compatible "
"with distributed configuration"
)
if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get("pin_memory", False):
# TODO: How about XLA GPU ?
warnings.warn(
"Found incompatible options: xla support and pin_memory args equal True. "
"Argument `pin_memory=False` will be used to construct data loader."
)
kwargs["pin_memory"] = False
else:
kwargs["pin_memory"] = kwargs.get("pin_memory", "cuda" in idist.device().type)
logger.info(f"Use data loader kwargs for dataset '{repr(dataset)[:20].strip()}': \n\t{kwargs}")
dataloader = DataLoader(dataset, **kwargs)
if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:
logger.info("DataLoader is wrapped by `MpDeviceLoader` on XLA")
mp_device_loader_cls = _MpDeviceLoader
try:
from torch_xla.distributed.parallel_loader import MpDeviceLoader
mp_device_loader_cls = MpDeviceLoader
except ImportError:
pass
mp_dataloader = mp_device_loader_cls(dataloader, idist.device())
mp_dataloader.sampler = dataloader.sampler # type: ignore[attr-defined]
return mp_dataloader
return dataloader
def auto_model(model: nn.Module, sync_bn: bool = False, **kwargs: Any) -> nn.Module:
"""Helper method to adapt provided model for non-distributed and distributed configurations (supporting
all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
Internally, we perform to following:
- send model to current :meth:`~ignite.distributed.utils.device()` if model's parameters are not on the device.
- wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1.
- wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.
- broadcast the initial variable states from rank 0 to all other processes if Horovod distributed framework is used.
Args:
model: model to adapt.
sync_bn: if True, applies `torch convert_sync_batchnorm`_ to the model for native torch
distributed only. Default, False. Note, if using Nvidia/Apex, batchnorm conversion should be
applied before calling ``amp.initialize``.
kwargs: kwargs to model's wrapping class: `torch DistributedDataParallel`_ or `torch DataParallel`_
if applicable. Please, make sure to use acceptable kwargs for given backend.
Returns:
torch.nn.Module
Examples:
.. code-block:: python
import ignite.distribted as idist
model = idist.auto_model(model)
In addition with NVidia/Apex, it can be used in the following way:
.. code-block:: python
import ignite.distribted as idist
model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
model = idist.auto_model(model)
.. _torch DistributedDataParallel: https://pytorch.org/docs/stable/generated/torch.nn.parallel.
DistributedDataParallel.html
.. _torch DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
.. _torch convert_sync_batchnorm: https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html#
torch.nn.SyncBatchNorm.convert_sync_batchnorm
.. versionchanged:: 0.4.2
- Added Horovod distributed framework.
- Added ``sync_bn`` argument.
.. versionchanged:: 0.4.3
Added kwargs to ``idist.auto_model``.
"""
logger = setup_logger(__name__ + ".auto_model")
# Put model's parameters to device if its parameters are not on the device
device = idist.device()
if not all([p.device == device for p in model.parameters()]):
model.to(device)
# distributed data parallel model
if idist.get_world_size() > 1:
bnd = idist.backend()
if idist.has_native_dist_support and bnd in (idist_native.NCCL, idist_native.GLOO, idist_native.MPI):
if sync_bn:
logger.info("Convert batch norm to sync batch norm")
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
if torch.cuda.is_available():
if "device_ids" in kwargs:
raise ValueError(f"Argument kwargs should not contain 'device_ids', but got {kwargs}")
lrank = idist.get_local_rank()
logger.info(f"Apply torch DistributedDataParallel on model, device id: {lrank}")
kwargs["device_ids"] = [
lrank,
]
else:
logger.info("Apply torch DistributedDataParallel on model")
model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
elif idist.has_hvd_support and bnd == idist_hvd.HOROVOD:
import horovod.torch as hvd
logger.info("Broadcast the initial variable states from rank 0 to all other processes")
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
# not distributed but multiple GPUs reachable so data parallel model
elif torch.cuda.device_count() > 1 and "cuda" in idist.device().type:
logger.info("Apply torch DataParallel on model")
model = torch.nn.parallel.DataParallel(model, **kwargs)
return model
def auto_optim(optimizer: Optimizer, **kwargs: Any) -> Optimizer:
"""Helper method to adapt optimizer for non-distributed and distributed configurations (supporting
all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
Internally, this method is no-op for non-distributed and torch native distributed configuration.
For XLA distributed configuration, we create a new class that inherits from provided optimizer.
The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.
For Horovod distributed configuration, optimizer is wrapped with Horovod Distributed Optimizer and
its state is broadcasted from rank 0 to all other processes.
Args:
optimizer: input torch optimizer
kwargs: kwargs to Horovod backend's DistributedOptimizer.
Returns:
Optimizer
Examples:
.. code-block:: python
import ignite.distributed as idist
optimizer = idist.auto_optim(optimizer)
.. _xm.optimizer_step: https://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step
.. versionchanged:: 0.4.2
Added Horovod distributed optimizer.
.. versionchanged:: 0.4.7
Added kwargs to ``idist.auto_optim``.
"""
bnd = idist.backend()
if idist.has_xla_support and bnd == idist_xla.XLA_TPU:
cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))
return cls(optimizer)
if idist.has_hvd_support and bnd == idist_hvd.HOROVOD:
import horovod.torch as hvd
optimizer = hvd.DistributedOptimizer(optimizer, **kwargs)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
return optimizer
return optimizer
class DistributedProxySampler(DistributedSampler):
"""Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.
Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407
Args:
sampler: Input torch data sampler.
num_replicas: Number of processes participating in distributed training.
rank: Rank of the current process within ``num_replicas``.
.. note::
Input sampler is assumed to have a constant size.
"""
def __init__(self, sampler: Sampler, num_replicas: Optional[int] = None, rank: Optional[int] = None) -> None:
if not isinstance(sampler, Sampler):
raise TypeError(f"Argument sampler should be instance of torch Sampler, but given: {type(sampler)}")
if isinstance(sampler, DistributedSampler):
raise TypeError("Argument sampler must not be a distributed sampler already")
if not hasattr(sampler, "__len__"):
raise TypeError("Argument sampler should have length")
super(DistributedProxySampler, self).__init__(
sampler, num_replicas=num_replicas, rank=rank, shuffle=False # type: ignore[arg-type]
)
self.sampler = sampler
def __iter__(self) -> Iterator:
# deterministically shuffle based on epoch
torch.manual_seed(self.epoch)
indices: List = []
while len(indices) < self.total_size:
indices += list(self.sampler)
if len(indices) > self.total_size:
indices = indices[: self.total_size]
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
if len(indices) != self.num_samples:
raise RuntimeError(f"{len(indices)} vs {self.num_samples}")
return iter(indices)
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
from torch_xla.distributed.parallel_loader import ParallelLoader
class _MpDeviceLoader:
# https://github.com/pytorch/xla/pull/2117
# From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available
def __init__(self, loader: Any, device: torch.device, **kwargs: Any) -> None:
self._loader = loader
self._device = device
self._parallel_loader_kwargs = kwargs
def __iter__(self) -> Iterator:
parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)
return parallel_loader.per_device_loader(self._device)
def __len__(self) -> int:
return len(self._loader)
class _XLADistributedOptimizer(Optimizer):
def __init__(self, optimizer: Optimizer) -> None:
super(self.__class__, self).__init__(optimizer.param_groups) # type: ignore[call-arg]
self.wrapped_optimizer = optimizer
def step(self, closure: Any = None) -> Any:
xm.optimizer_step(self.wrapped_optimizer, barrier=True)
|
import warnings
from typing import Any, Callable, cast, List, Mapping, Optional, Tuple
import torch
from ignite.distributed.comp_models.base import ComputationModel
try:
import horovod.torch as hvd
try:
# old API
from horovod.run.runner import run as hvd_mp_spawn
except ImportError:
# new API: https://github.com/horovod/horovod/pull/2099
from horovod import run as hvd_mp_spawn
has_hvd_support = True
except ImportError:
has_hvd_support = False
if has_hvd_support:
HOROVOD = "horovod"
class _HorovodDistModel(ComputationModel):
"""Private class for `Horovod <https://horovod.readthedocs.io/en/stable/>`_ distributed computation model."""
name = "horovod-dist"
available_backends = (HOROVOD,)
@staticmethod
def _get_hvd_rank() -> int:
try:
rank = hvd.rank()
except ValueError as e:
rank = -1
return rank
@staticmethod
def create_from_context() -> Optional["_HorovodDistModel"]:
rank = _HorovodDistModel._get_hvd_rank()
# hvd must be initialized
if not rank > -1:
return None
return _HorovodDistModel()
@staticmethod
def create_from_backend(backend: str = HOROVOD, **kwargs: Any) -> "_HorovodDistModel":
if backend not in _HorovodDistModel.available_backends:
raise ValueError(f"Backend should be one of '{_HorovodDistModel.available_backends}'")
rank = _HorovodDistModel._get_hvd_rank()
# hvd must be not initialized
if rank > -1:
raise RuntimeError("Can not re-initialize Horovod if it is already initialized")
return _HorovodDistModel(backend, **kwargs)
def __init__(self, backend: Optional[str] = None, **kwargs: Any) -> None:
"""This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_HorovodDistModel, self).__init__()
if backend is not None:
self._create_from_backend(backend, **kwargs)
else:
self._init_from_context()
def _create_from_backend(self, backend: str, **kwargs: Any) -> None:
self._backend: str = backend
comm = kwargs.get("comm", None)
hvd.init(comm=comm)
self._setup_attrs()
if torch.cuda.is_available():
torch.cuda.set_device(self.get_local_rank())
def _init_from_context(self) -> None:
self._backend = HOROVOD
self._setup_attrs()
def _compute_nproc_per_node(self) -> int:
return hvd.local_size()
def get_local_rank(self) -> int:
return hvd.local_rank()
def get_rank(self) -> int:
return hvd.rank()
def get_world_size(self) -> int:
return hvd.size()
def get_nproc_per_node(self) -> int:
return cast(int, self._nproc_per_node)
def get_nnodes(self) -> int:
return cast(int, self._nnodes)
def get_node_rank(self) -> int:
return cast(int, self._node)
def device(self) -> torch.device:
if torch.cuda.is_available():
index = torch.cuda.current_device()
if index < self.get_local_rank():
warnings.warn(
"Current device index is less than current local rank. "
"Please, make sure to call torch.cuda.set_device(local_rank)."
)
return torch.device(f"cuda:{index}")
return torch.device("cpu")
def backend(self) -> str:
return self._backend
def finalize(self) -> None:
hvd.shutdown()
@staticmethod
def _dist_worker_task_fn(backend: str, fn: Callable, args: Tuple, kwargs_dict: Mapping) -> None:
from ignite.distributed.utils import _set_model, finalize
model = _HorovodDistModel.create_from_backend(backend)
_set_model(model)
fn(model.get_local_rank(), *args, **kwargs_dict)
finalize()
@staticmethod
def spawn(
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
hosts: Optional[str] = None,
backend: str = HOROVOD,
**kwargs: Any,
) -> None:
c1 = "nnodes" in kwargs and kwargs["nnodes"] > 1
c2 = "node_rank" in kwargs and kwargs["node_rank"] > 0
if c1 or c2:
raise RuntimeError(
"For multi-node configuration, please set 'hosts' argument instead according to horovod.run API."
)
if "nnodes" in kwargs:
# Remove 'nnodes=1' as it is an unexpected keyword argument for horovod.run
del kwargs["nnodes"]
if "node_rank" in kwargs:
# Remove 'node_rank=0' as it is an unexpected keyword argument for horovod.run
del kwargs["node_rank"]
hvd_mp_spawn(
_HorovodDistModel._dist_worker_task_fn,
args=(HOROVOD, fn, args, kwargs_dict),
num_proc=nproc_per_node,
hosts=hosts,
**kwargs,
)
_reduce_op_map = {
"SUM": hvd.mpi_ops.Sum,
"AVERAGE": hvd.mpi_ops.Average,
"ADASUM": hvd.mpi_ops.Adasum,
}
_manual_reduce_op_map = {"MIN": torch.min, "MAX": torch.max, "PRODUCT": torch.prod}
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
if group is not None:
raise NotImplementedError("all_reduce with group for horovod is not implemented")
if op in self._manual_reduce_op_map:
op_fn = self._manual_reduce_op_map[op]
return self._do_manual_all_reduce(tensor, op_fn)
if op not in self._reduce_op_map:
raise ValueError(f"Unsupported reduction operation: '{op}'")
op = self._reduce_op_map[op]
return hvd.allreduce(tensor, op=op)
def _do_manual_all_reduce(self, tensor: torch.Tensor, op: Any) -> torch.Tensor:
# We have to unsqueeze otherwise tensors will be gathered into a single tensor
# without splitting (e.g. [1, 1, 1, 3, 3, 3] instead of [[1, 1, 1], [3, 3, 3]])
# and reduction op wont work as expected
res = self._do_all_gather(tensor.unsqueeze(0))
reduced_res = op(res, dim=0)
if isinstance(reduced_res, torch.Tensor):
return reduced_res
# output can also torch min/max_return_type: (min/max_vals, indices)
return reduced_res[0]
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
if group is not None:
raise NotImplementedError("all_gather with group for horovod is not implemented")
if tensor.ndimension() == 0:
tensor = tensor.unsqueeze(0)
return hvd.allgather(tensor)
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
if group is not None:
raise NotImplementedError("all_gather with group for horovod is not implemented")
return hvd.allgather_object(tensor)
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return hvd.ProcessSet(ranks)
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
return hvd.broadcast(tensor, root_rank=src)
def barrier(self) -> None:
# https://github.com/horovod/horovod/issues/159#issuecomment-424834603
# hvd.allreduce(torch.tensor(0, device=self.device()), name="barrier")
hvd.allreduce(torch.tensor(0, device="cpu"), name="barrier")
|
from typing import List, Tuple, Type, TYPE_CHECKING, Union
from ignite.distributed.comp_models.base import _SerialModel
from ignite.distributed.comp_models.horovod import has_hvd_support
from ignite.distributed.comp_models.native import has_native_dist_support
from ignite.distributed.comp_models.xla import has_xla_support
if TYPE_CHECKING:
from ignite.distributed.comp_models.horovod import _HorovodDistModel
from ignite.distributed.comp_models.native import _NativeDistModel
from ignite.distributed.comp_models.xla import _XlaDistModel
def setup_available_computation_models() -> (
Tuple[Type[Union[_SerialModel, "_NativeDistModel", "_XlaDistModel", "_HorovodDistModel"]], ...]
):
models: List[Type[Union[_SerialModel, "_NativeDistModel", "_XlaDistModel", "_HorovodDistModel"]]] = [
_SerialModel,
]
if has_native_dist_support:
from ignite.distributed.comp_models.native import _NativeDistModel
models.append(_NativeDistModel)
if has_xla_support:
from ignite.distributed.comp_models.xla import _XlaDistModel
models.append(_XlaDistModel)
if has_hvd_support:
from ignite.distributed.comp_models.horovod import _HorovodDistModel
models.append(_HorovodDistModel)
return tuple(models)
registered_computation_models = setup_available_computation_models()
|
import os
import re
import subprocess
import warnings
from typing import Any, Callable, cast, Dict, List, Mapping, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from packaging.version import Version
from ignite.distributed.comp_models.base import ComputationModel
has_native_dist_support = dist.is_available()
if has_native_dist_support:
NCCL = dist.Backend.NCCL
GLOO = dist.Backend.GLOO
MPI = dist.Backend.MPI
class _NativeDistModel(ComputationModel):
"""Private class for PyTorch native distributed computation model.
Supported `backends <https://pytorch.org/docs/stable/distributed.html#backends>`_:
- NCCL
- GLOO
- MPI
In this implementation we assume the following mapping between backend and devices:
- NCCL <-> GPU
- GLOO <-> CPU or GPU
- MPI <-> CPU
"""
name = "native-dist"
available_backends = tuple(name for name in [NCCL, GLOO, MPI] if getattr(dist, f"is_{name}_available")())
@staticmethod
def create_from_context() -> Optional["_NativeDistModel"]:
if not (dist.is_available() and dist.is_initialized()):
return None
return _NativeDistModel()
@staticmethod
def create_from_backend(
backend: str,
init_method: Optional[str] = None,
world_size: Optional[int] = None,
rank: Optional[int] = None,
**kwargs: Any,
) -> "_NativeDistModel":
if backend not in _NativeDistModel.available_backends:
raise ValueError(f"Backend should be one of '{_NativeDistModel.available_backends}'")
if dist.is_available() and dist.is_initialized():
raise RuntimeError("Can not create new distributed process group if default one is already initialized")
if init_method is None:
if world_size is not None or rank is not None:
raise ValueError("Arguments rank and world_size should be None if no init_method is provided")
else:
has_rank = rank is not None
has_ws = world_size is not None
if (has_rank or has_ws) and (not has_rank or not has_ws):
raise ValueError(f"Both rank and world_size should be provided, but given {rank} and {world_size}")
return _NativeDistModel(
backend=backend, init_method=init_method, world_size=world_size, rank=rank, **kwargs
)
def __init__(
self,
backend: Optional[str] = None,
timeout: Optional[int] = None,
init_method: Optional[str] = None,
world_size: Optional[int] = None,
rank: Optional[int] = None,
**kwargs: Any,
) -> None:
"""This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_NativeDistModel, self).__init__()
self._env_backup: Optional[Dict[str, str]] = None
self._local_rank: Optional[int] = None
self._master_port: Optional[int] = None
self._master_addr: Optional[str] = None
self._init_method: Optional[str] = None
if backend is not None:
self._create_from_backend(
backend, timeout=timeout, init_method=init_method, world_size=world_size, rank=rank, **kwargs
)
else:
self._init_from_context()
def _create_from_backend(
self,
backend: str,
timeout: Optional[int] = None,
init_method: Optional[str] = None,
world_size: Optional[int] = None,
rank: Optional[int] = None,
**kwargs: Any,
) -> None:
if backend == dist.Backend.NCCL and not torch.cuda.is_available():
raise RuntimeError("Nccl backend is required but no cuda capable devices")
self._backend = backend
self.setup_env_vars(rank, world_size)
init_pg_kwargs: Dict[str, Any] = {}
if timeout is not None:
init_pg_kwargs["timeout"] = timeout
if init_method is None:
init_method = "env://"
if "env" not in init_method:
init_pg_kwargs["world_size"] = int(os.environ["WORLD_SIZE"])
init_pg_kwargs["rank"] = int(os.environ["RANK"])
self._init_method = init_method
dist.init_process_group(backend, init_method=init_method, **init_pg_kwargs)
if torch.cuda.is_available():
torch.cuda.set_device(self._local_rank)
# Call barrier after init_process_group as in
# https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
# Define device ids for NCCL to avoid warnings
# [W ProcessGroupNCCL.cpp:1569] Rank 0 using best-guess GPU 0 to perform barrier as devices used by
# this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping
# is incorrect.Specify device_ids in barrier() to force use of a particular device.
if backend == dist.Backend.NCCL and Version(torch.__version__) >= Version("1.8.0"):
device_ids = [torch.cuda.current_device()]
dist.barrier(device_ids=device_ids)
else:
# For older versions there is no device_ids arg
dist.barrier()
self._setup_attrs()
def _init_from_context(self) -> None:
self._backend = dist.get_backend()
self._identify_local_rank()
self._setup_attrs()
def _compute_nproc_per_node(self) -> int:
local_rank = self.get_local_rank()
# Create new cpu group to get nproc_per_node such we avoid using
# badly configured NCCL
gloo_group = dist.new_group(backend="gloo")
tensor = torch.tensor([local_rank + 1]).to("cpu")
dist.all_reduce(tensor, op=dist.ReduceOp.MAX, group=gloo_group)
dist.destroy_process_group(gloo_group)
return int(tensor.item())
def _get_all_hostnames(self) -> List[Tuple[str, ...]]:
import socket
device = "cpu"
if torch.cuda.is_available():
index = torch.cuda.current_device()
device = f"cuda:{index}"
hostname = socket.gethostname()
name = torch.tensor(bytearray(hostname, "utf-8")).to(device)
padded_t_name = torch.zeros(256, device=device, dtype=torch.long)
padded_t_name[: len(name)] = name
out_t_names = [torch.zeros_like(padded_t_name) for _ in range(self.get_world_size())]
dist.all_gather(out_t_names, padded_t_name)
return [tuple(t.cpu().tolist()) for t in out_t_names]
@staticmethod
def _compute_node_and_local_ranks(rank: int, hostnames: List[Tuple[str, ...]]) -> Tuple[int, int]:
from collections import Counter
c: Counter = Counter(hostnames)
sizes = torch.tensor([0] + list(c.values()))
cumsum_sizes = torch.cumsum(sizes, dim=0)
node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()
local_rank = rank - cumsum_sizes[node_rank].item()
return int(local_rank), node_rank
def _compute_local_rank_via_hostname(self) -> int:
# get all hostnames
hostnames = self._get_all_hostnames()
local_rank, self._node = self._compute_node_and_local_ranks(self.get_rank(), hostnames)
if local_rank < 0 or self._node < 0:
raise ValueError(
"Failed to correctly estimate local rank. "
f"Debugging info: local rank: {local_rank}, node rank: {self._node}, hostnames: {hostnames}"
)
return local_rank
def _identify_local_rank(self) -> None:
if "SLURM_JOB_ID" in os.environ:
os.environ["LOCAL_RANK"] = os.environ["SLURM_LOCALID"]
if "LOCAL_RANK" in os.environ:
self._local_rank = int(os.environ["LOCAL_RANK"])
elif self._ext_local_rank is not None:
self._local_rank = self._ext_local_rank
else:
warnings.warn(
"Local rank information for native distributed setting will be initialized using "
"a heuristic approach based on the hostnames. In some corner cases, determined "
"local rank can be different from the real setup. To avoid this warning, "
"please either set `os.environ['LOCAL_RANK']` "
"or use `idist.set_local_rank(local_rank)` with correct local rank index."
)
# use socket gethostname heuristic to determine number of nodes => local rank
self._local_rank = self._compute_local_rank_via_hostname()
def setup_env_vars(self, rank: Optional[int] = None, world_size: Optional[int] = None) -> None:
self._env_backup = os.environ.copy()
if "SLURM_JOB_ID" in os.environ:
if rank is not None or world_size is not None:
raise ValueError("Arguments rank and world_size should not be specified with SLURM")
self._setup_env_in_slurm()
else:
env_vars = ["RANK", "LOCAL_RANK", "WORLD_SIZE"]
all_env_vars_defined = [k in os.environ for k in env_vars]
# check if all necessary env vars are set
# if partially defined raise an error
if any(all_env_vars_defined) and not all(all_env_vars_defined):
raise RuntimeError(f"PyTorch distributed configuration should define env variables '{env_vars}'")
os.environ["RANK"] = os.environ.get("RANK", f"{rank if rank is not None else 0}")
os.environ["WORLD_SIZE"] = os.environ.get(
"WORLD_SIZE", f"{world_size if world_size is not None else 1}"
)
os.environ["LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
os.environ["MASTER_PORT"] = os.environ.get("MASTER_PORT", "15000")
os.environ["MASTER_ADDR"] = os.environ.get("MASTER_ADDR", "127.0.0.1")
self._local_rank = int(os.environ["LOCAL_RANK"])
self._master_addr = os.environ["MASTER_ADDR"]
self._master_port = int(os.environ["MASTER_PORT"])
def _setup_env_in_slurm(self) -> None:
slurm_env_req_vars = [
"SLURM_JOB_ID",
"SLURM_PROCID",
"SLURM_LOCALID",
"SLURM_NTASKS",
"SLURM_JOB_NODELIST",
"SLURM_JOB_NUM_NODES",
]
for k in slurm_env_req_vars:
if k not in os.environ:
raise RuntimeError(f"SLURM distributed configuration is missing '{k}' in env variables")
ddp_vars = _setup_ddp_vars_from_slurm_env(cast(Dict, os.environ))
# define DDP env vars required by PTH:
for key, value in ddp_vars.items():
os.environ[key] = str(value)
def get_local_rank(self) -> int:
return cast(int, self._local_rank)
def get_rank(self) -> int:
return dist.get_rank()
def get_world_size(self) -> int:
return dist.get_world_size()
def get_nproc_per_node(self) -> int:
return cast(int, self._nproc_per_node)
def get_nnodes(self) -> int:
return cast(int, self._nnodes)
def get_node_rank(self) -> int:
return cast(int, self._node)
def device(self) -> torch.device:
if torch.cuda.is_available():
index = torch.cuda.current_device()
if index < self.get_local_rank():
warnings.warn(
"Current device index is less than current local rank. "
"Please, make sure to call torch.cuda.set_device(local_rank)."
)
return torch.device(f"cuda:{index}")
return torch.device("cpu")
def backend(self) -> str:
return dist.get_backend()
def finalize(self) -> None:
dist.destroy_process_group()
# restore backed-up env
self._restore_env()
def _restore_env(self) -> None:
# restore backed-up env
if self._env_backup is not None:
os.environ.clear()
os.environ.update(self._env_backup)
@staticmethod
def _dist_worker_task_fn(
local_rank: int,
backend: str,
fn: Callable,
args: Tuple,
kw_dict: Mapping,
world_size: int,
nprocs_per_node: int,
node_rank: int,
master_addr: Optional[str],
master_port: Optional[str],
init_method: str,
kw: Any,
) -> None:
from ignite.distributed.utils import _set_model, finalize
copy_env_vars = os.environ.copy()
rank = node_rank * nprocs_per_node + local_rank
os.environ["LOCAL_RANK"] = str(local_rank)
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
arg_world_size: Optional[int] = world_size
arg_rank: Optional[int] = rank
if init_method == "env://":
os.environ["MASTER_ADDR"] = str(master_addr)
os.environ["MASTER_PORT"] = str(master_port)
arg_world_size = None
arg_rank = None
model = _NativeDistModel.create_from_backend(
backend, init_method=init_method, world_size=arg_world_size, rank=arg_rank, **kw
)
_set_model(model)
fn(local_rank, *args, **kw_dict)
finalize()
os.environ.clear()
os.environ.update(copy_env_vars)
@staticmethod
def spawn(
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
nnodes: int = 1,
node_rank: int = 0,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
backend: str = "nccl",
init_method: Optional[str] = None,
**kwargs: Any,
) -> None:
world_size = nnodes * nproc_per_node
spawn_kwargs = {
"join": kwargs.get("join", True),
"daemon": kwargs.get("daemon", False),
}
start_processes = mp.spawn
# start_method and start_processes in pytorch >= 1.5
if Version(torch.__version__) >= Version("1.5.0"):
import builtins
if "__IPYTHON__" in builtins.__dict__:
# use fork in jupyter
default_start_method = "fork"
else:
default_start_method = "spawn"
spawn_kwargs["start_method"] = kwargs.get("start_method", default_start_method)
start_processes = mp.start_processes
# TODO: `spawn` wrongfully does not adopt address and port from environment if `init_method` is "env://"
if init_method in [None, "env://"]:
init_method = "env://"
if master_port is None:
master_port = 2222
if master_addr is None:
master_addr = "127.0.0.1"
elif master_addr is not None:
raise ValueError("master_addr should be None if init_method is provided other then 'env://'")
elif master_port is not None:
raise ValueError("master_port should be None if init_method is provided other then 'env://'")
start_processes(
_NativeDistModel._dist_worker_task_fn,
nprocs=nproc_per_node,
args=(
backend,
fn,
args,
kwargs_dict,
world_size,
nproc_per_node,
node_rank,
master_addr,
master_port,
init_method,
kwargs,
),
**spawn_kwargs,
)
_reduce_op_map = {
"SUM": dist.ReduceOp.SUM,
"PRODUCT": dist.ReduceOp.PRODUCT,
"MIN": dist.ReduceOp.MIN,
"MAX": dist.ReduceOp.MAX,
"AND": dist.ReduceOp.BAND,
"OR": dist.ReduceOp.BOR,
}
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
if op not in self._reduce_op_map:
raise ValueError(f"Unsupported reduction operation: '{op}'")
if group is not None and not isinstance(group, dist.ProcessGroup):
raise ValueError("Argument group should be list of int or ProcessGroup")
reduce_op = self._reduce_op_map[op]
# We do if/else here for compatibility with older pytorch versions
if group is not None:
dist.all_reduce(tensor, reduce_op, group=group)
else:
dist.all_reduce(tensor, reduce_op)
return tensor
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
if group == dist.GroupMember.NON_GROUP_MEMBER:
return tensor
if group is None:
group_size = self.get_world_size()
elif isinstance(group, dist.ProcessGroup):
group_size = group.size()
else:
raise ValueError("Argument group should be list of int or ProcessGroup")
if tensor.ndimension() == 0:
tensor = tensor.unsqueeze(0)
output = [torch.zeros_like(tensor) for _ in range(group_size)]
# We do if/else here for compatibility with older pytorch versions
if group is not None:
dist.all_gather(output, tensor, group=group)
else:
dist.all_gather(output, tensor)
return torch.cat(output, dim=0)
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
if Version(torch.__version__) < Version("1.7.0"):
raise RuntimeError(
"Current torch version does not implement dist.all_gather_object. "
"Required version should be >=1.7.0"
)
if group == dist.GroupMember.NON_GROUP_MEMBER:
return tensor
if group is None:
group_size = self.get_world_size()
elif isinstance(group, dist.ProcessGroup):
group_size = group.size()
else:
raise ValueError("Argument group should be list of int or ProcessGroup")
output = [None for _ in range(group_size)]
# We do if/else here for compatibility with older pytorch versions
if group is not None:
dist.all_gather_object(output, tensor, group=group)
else:
dist.all_gather_object(output, tensor)
return output
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return dist.new_group(ranks=ranks, **kwargs)
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
dist.broadcast(tensor, src=src)
return tensor
def barrier(self) -> None:
dist.barrier()
def _expand_hostlist(nodelist: str) -> List[str]:
"""Expand a compressed hostlist string and returns all hosts listed.
Source : https://github.com/LLNL/py-hostlist/blob/master/hostlist/hostlist.py
Args:
nodelist: Compressed hostlist string
.. note::
The host names can be composed by any character except the special ones `[`, `]`, `,`. Only one
sequence `[...]` is supported per hostname.
.. versionadded:: 0.4.6
"""
result_hostlist = []
nodelist_match = r"([^,\[\]]+\[[^\[\]]*\][^,\[\]]*|[^,\[\]]*),?"
nodelist = nodelist.replace(" ", "")
for node in re.findall(nodelist_match, nodelist):
node_match = r"(.+)\[((,?[0-9]+-?,?-?){0,})\](.*)?"
match = re.search(node_match, node)
if match is None:
if node:
result_hostlist.append(node)
else:
# holds the ranges of nodes as a string
# now we can manipulate the string and cast it to a list of numbers
num = str(match.group(2)).replace("[", "").replace("]", "")
if len(num) == 0:
raise ValueError(f"hostlist invalid : {nodelist}")
num_list = num.split(",")
# find range of node numbers
ranges = [elem.split("-") if "-" in elem else [elem, elem] for elem in num_list]
# if the node numbers contain leading zeros, store them to be
lead_zeros = max([len(s) - len(s.lstrip("0")) for s, _ in ranges])
# list of expanded ranges of node numbers
nodes_list = [list(range(int(s), int(e) + 1)) for s, e in ranges]
# flat the list
final_list = [item for sublist in nodes_list for item in sublist]
# put final list in ascending order and append cluster name to each node number
final_list = list(sorted(set(final_list)))
# prepend leading zeros to numbers required
hostlist_tmp = [str(elem).zfill(lead_zeros + 1) for elem in final_list]
# append hostname to the node numbers
hostlist_no_suffix = [match.group(1) + elem for elem in hostlist_tmp]
# append suffix to hostlist if there is one
final_hostlist = [elem + match.group(4) for elem in hostlist_no_suffix]
result_hostlist += final_hostlist
return result_hostlist
def _setup_ddp_vars_from_slurm_env(environ: Dict[str, str]) -> Dict[str, Union[str, int]]:
"""Method to setup DDP env vars required by PyTorch from SLURM env"""
# 1) Tools like enroot can have hooks to translate slurm env vars to RANK, LOCAL_RANK, WORLD_SIZE etc
# See https://github.com/NVIDIA/enroot/blob/v3.1.0/conf/hooks/extra/50-slurm-pytorch.sh
# 2) User can use torch.distributed.launch tool to schedule on N local GPUs using 1 node, 1 task by SLURM
# To cover case 1), let's ensure that defined RANK == SLURM_PROCID, LOCAL_RANK == SLURM_LOCALID,
# WORLD_SIZE == SLURM_NTASKS. We will use defined MASTER_ADDR and MASTER_PORT instead of defining
# them by our means
# To cover case 2), let's check that defined RANK >= SLURM_PROCID, LOCAL_RANK >= SLURM_LOCALID,
# WORLD_SIZE >= SLURM_NTASKS, SLURM_JOB_NUM_NODES == 1
ddp_vars: Dict[str, Union[str, int, None]] = {
"RANK": int(environ["SLURM_PROCID"]),
"LOCAL_RANK": int(environ["SLURM_LOCALID"]),
"WORLD_SIZE": int(environ["SLURM_NTASKS"]),
"MASTER_ADDR": None,
"MASTER_PORT": None,
}
pth_ddp_env_vars = {key: environ.get(key, None) for key in ddp_vars}
defined_pth_ddp_env_vars = [v is not None for v in pth_ddp_env_vars.values()]
if all(defined_pth_ddp_env_vars):
nnodes = int(environ["SLURM_JOB_NUM_NODES"])
if nnodes > 1:
# ensure that all pth_ddp_env_vars are consistent with slurm vars
for key in ["RANK", "LOCAL_RANK", "WORLD_SIZE"]:
slurm_var = cast(int, ddp_vars[key])
pth_var = int(cast(str, pth_ddp_env_vars[key]))
if slurm_var != pth_var:
raise RuntimeError(
"Environment variable defined for PyTorch Distributed context is inconsistent with "
f"equivalent SLURM env variable. {key}: {pth_var} vs {slurm_var}\n"
f"SLURM vars: {ddp_vars}\n"
f"PTH vars: {pth_ddp_env_vars}\n"
)
else:
# ensure that PTH RANK >= SLURM_PROCID, PTH LOCAL_RANK >= SLURM_LOCALID,
# PTH WORLD_SIZE >= SLURM_NTASKS
for key in ["RANK", "LOCAL_RANK", "WORLD_SIZE"]:
slurm_var = cast(int, ddp_vars[key])
pth_var = int(cast(str, pth_ddp_env_vars[key]))
if pth_var < slurm_var:
raise RuntimeError(
"Environment variable defined for PyTorch Distributed context is "
"inconsistent with equivalent SLURM env variable. "
f"We expect that {key}: {pth_var} >= {slurm_var}\n"
f"SLURM vars: {ddp_vars}\n"
f"PTH vars: {pth_ddp_env_vars}\n"
)
ddp_vars[key] = pth_var
# set up MASTER_ADDR and MASTER_PORT from PTH
ddp_vars["MASTER_ADDR"] = cast(str, pth_ddp_env_vars["MASTER_ADDR"])
ddp_vars["MASTER_PORT"] = int(cast(str, pth_ddp_env_vars["MASTER_PORT"]))
elif any(defined_pth_ddp_env_vars):
# Let's warn user about PTH env variables that we could not taken into account
warnings.warn(
"We detected the following env variables: "
f"{[(k, v) for k, v in pth_ddp_env_vars.items() if v is not None]},\n"
"but will not take them into account as the following env vars are missing:"
f"{[k for k, v in pth_ddp_env_vars.items() if v is None]},\n"
)
if ddp_vars["MASTER_ADDR"] is None:
nodelist = environ["SLURM_JOB_NODELIST"]
try:
# use scontrol to expand hostname list
hostnames = subprocess.check_output(["scontrol", "show", "hostnames", nodelist])
method = "scontrol"
except FileNotFoundError:
# expand hostname list as scontrol
hostnames = " ".join(_expand_hostlist(nodelist)).encode("utf-8")
method = "ignite"
# at least one hostname should be defined
hostname_list = hostnames.split()
if len(hostname_list) < 1:
raise RuntimeError(f"No hostname detected in SLURM_JOB_NODELIST by {method} (nodelist={nodelist})")
# master address is the first hostname of nodes list
ddp_vars["MASTER_ADDR"] = str(hostname_list[0].decode("utf-8"))
if ddp_vars["MASTER_PORT"] is None:
# port should be the same over all process
slurm_port = environ["SLURM_JOB_ID"]
slurm_port = slurm_port[-4:]
ddp_vars["MASTER_PORT"] = int(slurm_port) + 15000
return cast(Dict[str, Union[str, int]], ddp_vars)
|
from typing import Any, Callable, cast, List, Mapping, Optional, Tuple
import torch
from ignite.distributed.comp_models.base import ComputationModel
try:
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
has_xla_support = True
except ImportError:
has_xla_support = False
if has_xla_support:
XLA_TPU = "xla-tpu"
class _XlaDistModel(ComputationModel):
"""Private class for PyTorch XLA basic distributed computation model.
It handles single/multi-device computation model.
Supported XLA devices:
- CPU
- TPU
"""
name = "xla-dist"
available_backends = (XLA_TPU,)
@staticmethod
def create_from_context() -> Optional["_XlaDistModel"]:
return _XlaDistModel()
@staticmethod
def create_from_backend(backend: str = XLA_TPU, **kwargs: Any) -> "_XlaDistModel":
if backend not in _XlaDistModel.available_backends:
raise ValueError(f"Backend should be one of '{_XlaDistModel.available_backends}'")
return _XlaDistModel(backend=backend, **kwargs)
def __init__(self, backend: Optional[str] = None, **kwargs: Any):
"""This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_XlaDistModel, self).__init__()
if backend is not None:
self._create_from_backend(backend, **kwargs)
else:
self._init_from_context()
def _create_from_backend(self, backend: str, **kwargs: Any) -> None:
xm.rendezvous("init")
self._backend: str = backend
self._setup_attrs()
def _init_from_context(self) -> None:
self._backend = XLA_TPU
self._setup_attrs()
def _compute_nproc_per_node(self) -> int:
tensor = torch.tensor([self.get_local_rank() + 1.0], dtype=torch.float).to(self.device())
xm.all_reduce("max", [tensor])
return int(tensor.item())
def get_local_rank(self) -> int:
return xm.get_local_ordinal()
def get_rank(self) -> int:
return xm.get_ordinal()
def get_world_size(self) -> int:
return xm.xrt_world_size()
def get_nproc_per_node(self) -> int:
return cast(int, self._nproc_per_node)
def get_nnodes(self) -> int:
return cast(int, self._nnodes)
def get_node_rank(self) -> int:
return cast(int, self._node)
def device(self) -> torch.device:
dev = torch_xla._XLAC._xla_get_default_device()
return torch.device(dev)
def backend(self) -> str:
return self._backend
def finalize(self) -> None:
pass
@staticmethod
def _dist_worker_task_fn(
local_rank: int, backend: str, fn: Callable, args: Tuple, kwargs_dict: Mapping
) -> None:
from ignite.distributed.utils import _set_model, finalize
model = _XlaDistModel.create_from_backend(backend)
_set_model(model)
fn(local_rank, *args, **kwargs_dict)
finalize()
@staticmethod
def spawn(
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
nnodes: int = 1,
node_rank: int = 0,
backend: str = XLA_TPU,
**kwargs: Any,
) -> None:
if "start_method" not in kwargs:
kwargs["start_method"] = "fork"
xmp.spawn(
_XlaDistModel._dist_worker_task_fn,
args=(backend, fn, args, kwargs_dict),
nprocs=nproc_per_node,
**kwargs,
)
_collective_op_dtype = torch.float32
_reduce_op_map = {
"SUM": "sum",
"PRODUCT": "mul",
"MIN": "min",
"MAX": "max",
"AND": "and",
"OR": "or",
}
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
if group is not None and not self._check_group_type(group):
raise ValueError("Argument group should be list of int")
op = self._reduce_op_map[op]
xm.all_reduce(op, [tensor], groups=group)
return tensor
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
# from https://github.com/jysohn23/xla/blob/model-parallel-colab/Gather_Scatter_Broadcast_PyTorch_XLA.ipynb
if group is not None and (not isinstance(group, list) or not all(isinstance(item, int) for item in group)):
raise ValueError("Argument group should be list of int")
group_size = self.get_world_size()
output = torch.zeros((group_size,) + tensor.shape, dtype=tensor.dtype, device=tensor.device)
output[self.get_rank() % group_size] = tensor
xm.all_reduce("sum", [output], groups=group)
return output.reshape(-1, *output.shape[2:])
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
raise NotImplementedError("all_gather on object is not implemented for xla")
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return [ranks]
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
# from https://github.com/jysohn23/xla/blob/model-parallel-colab/Gather_Scatter_Broadcast_PyTorch_XLA.ipynb
if src != self.get_rank():
tensor.fill_(0.0)
xm.all_reduce("sum", [tensor])
return tensor
def barrier(self) -> None:
xm.rendezvous("barrier")
def _check_group_type(self, group: Optional[Any]) -> bool:
if isinstance(group, list) and all(isinstance(item, int) for item in group):
return True
return False
|
from abc import ABCMeta, abstractmethod
from numbers import Number
from typing import Any, Callable, cast, List, Optional, Union
import torch
class ComputationModel(metaclass=ABCMeta):
"""Base class for distributed computation models and defines interface methods.
This class is public and should be used for other custom derived distributed models.
"""
# this is an additional local rank storage used when idist is setup from existing native torch dist context
_ext_local_rank: Optional[int] = None
def __init__(self) -> None:
self._backend: Optional[str] = None
self._nproc_per_node: Optional[int] = None
self._nnodes: Optional[int] = None
self._node: Optional[int] = None
def _setup_attrs(self) -> None:
if self._nproc_per_node is None:
self._nproc_per_node = self._compute_nproc_per_node() if self.get_world_size() > 1 else 1
if self._nnodes is None:
self._nnodes = self.get_world_size() // self._nproc_per_node
if self._node is None:
self._node = self.get_rank() // self._nproc_per_node
@abstractmethod
def _compute_nproc_per_node(self) -> int:
pass
@abstractmethod
def get_local_rank(self) -> int:
pass
@abstractmethod
def get_rank(self) -> int:
pass
@abstractmethod
def get_world_size(self) -> int:
pass
@abstractmethod
def get_nproc_per_node(self) -> int:
pass
@abstractmethod
def get_nnodes(self) -> int:
pass
@abstractmethod
def get_node_rank(self) -> int:
pass
@abstractmethod
def device(self) -> torch.device:
pass
@abstractmethod
def backend(self) -> Optional[str]:
pass
@abstractmethod
def finalize(self) -> None:
pass
@staticmethod
@abstractmethod
def create_from_context() -> Optional["ComputationModel"]:
pass
@staticmethod
@abstractmethod
def create_from_backend(backend: str, **kwargs: Any) -> "ComputationModel":
pass
@staticmethod
@abstractmethod
def spawn(*args: Any, **kwargs: Any) -> None:
pass
_collective_op_dtype: Any = None
@staticmethod
def _encode_str(x: str, device: torch.device, size: int) -> torch.Tensor:
name = torch.tensor(bytearray(x, "utf-8")).to(device)
padded_x = torch.zeros(size + 1, device=device, dtype=torch.long)
padded_x[: len(name)] = name
padded_x[-1] = len(name)
# output is tensor of shape (1, size + 1)
return padded_x.unsqueeze(0)
def _get_max_length(self, x: str, device: torch.device) -> int:
size = torch.tensor([len(x)], device=device)
size = self._do_all_reduce(size, op="MAX")
return cast(int, size.item())
@staticmethod
def _encode_input_data(x: Union[torch.Tensor, float, str, None], is_src: bool) -> List[int]:
encoded_msg = [-1] * 512
if not is_src:
# Discard input type if not source
return encoded_msg
if isinstance(x, torch.Tensor):
shape = x.shape
dtype = str(x.dtype)
msg = [0, len(shape), *shape, len(dtype), *list(bytearray(dtype, "utf-8"))]
encoded_msg[: len(msg)] = msg
elif isinstance(x, Number):
encoded_msg[0] = 1
elif isinstance(x, str):
encoded_msg[0] = 2
return encoded_msg
@staticmethod
def _decode_as_placeholder(encoded_msg: List[int], device: torch.device) -> Union[torch.Tensor, float, str]:
if encoded_msg[0] == 0:
len_shape = encoded_msg[1]
le = 2 + len_shape
shape = encoded_msg[2:le] if len_shape > 0 else []
len_dtype = encoded_msg[le]
dtype_str = bytearray(encoded_msg[le + 1 : le + 1 + len_dtype]).decode("utf-8")
dtype = eval(dtype_str)
return torch.empty(shape, device=device, dtype=dtype)
elif encoded_msg[0] == 1:
return 0.0
elif encoded_msg[0] == 2:
return ""
else:
raise RuntimeError(f"Internal error: unhandled dtype {encoded_msg[0]}, encoded_msg={encoded_msg}")
def _setup_placeholder(
self, x: Union[torch.Tensor, float, str, None], device: torch.device, is_src: bool
) -> Union[torch.Tensor, float, str]:
encoded_msg_per_rank = self._encode_input_data(x, is_src)
encoded_msg_all_ranks = self._do_all_reduce(torch.tensor(encoded_msg_per_rank, device=device), op="MAX")
if is_src:
if x is None:
raise RuntimeError("Internal error, x is None. Please, file an issue if you encounter this error.")
return x
encoded_msg = encoded_msg_all_ranks.cpu().tolist()
return self._decode_as_placeholder(encoded_msg, device)
@staticmethod
def _decode_str(xs: torch.Tensor) -> List[str]:
# xs.shape = (n, size + 1), e.g. (world_size, size + 1)
out = [bytearray(x[: x[-1]].tolist()).decode("utf-8") for x in xs]
return out
def _apply_op(
self, tensor: torch.Tensor, device: torch.device, fn: Callable, *args: Any, **kwargs: Any
) -> torch.Tensor:
out_dtype = None
tensor_device = None
# check if the tensor is at specified device
if tensor.device != device:
tensor_device = tensor.device
tensor = tensor.to(device)
if self._collective_op_dtype is not None:
# cast to _collective_op_dtype if current type is not floatX
if tensor.dtype not in (torch.float32, torch.float64):
out_dtype = tensor.dtype
tensor = tensor.to(self._collective_op_dtype)
tensor = fn(tensor, *args, **kwargs)
if out_dtype is not None and tensor_device is not None:
return tensor.to(dtype=out_dtype, device=tensor_device)
if out_dtype is not None:
return tensor.to(dtype=out_dtype)
if tensor_device is not None:
return tensor.to(device=tensor_device)
return tensor
def _collective_op(
self, tensor: Union[torch.Tensor, Number, str], fn: Callable, *args: Any, **kwargs: Any
) -> Union[torch.Tensor, float, List[float], List[str]]:
tensor_to_number = tensor_to_str = False
device = self.device()
if isinstance(tensor, (Number, float)):
tensor_to_number = True
dtype = self._collective_op_dtype
if dtype is None and isinstance(tensor, float):
dtype = torch.double
tensor = torch.tensor(tensor, device=device, dtype=dtype)
elif isinstance(tensor, str):
tensor_to_str = True
max_length = self._get_max_length(tensor, device)
tensor = self._encode_str(tensor, device, size=max_length)
tensor = self._apply_op(tensor, device, fn, *args, **kwargs)
if tensor_to_number:
if tensor.numel() == 1:
return tensor.item()
else:
return tensor.tolist()
elif tensor_to_str:
return self._decode_str(tensor)
return tensor
def all_reduce(
self, tensor: Union[torch.Tensor, float], op: str = "sum", group: Optional[Any] = None
) -> Union[torch.Tensor, float]:
if not isinstance(tensor, (torch.Tensor, Number)):
raise TypeError(f"Unhandled input type {type(tensor)}")
return cast(Union[torch.Tensor, float], self._collective_op(tensor, self._do_all_reduce, op, group=group))
def all_gather(
self, tensor: Union[torch.Tensor, float, str, Any], group: Optional[Any] = None
) -> Union[torch.Tensor, float, List[float], List[str]]:
if not isinstance(tensor, (torch.Tensor, Number, str)):
return self._do_all_gather_object(tensor, group=group)
return self._collective_op(tensor, self._do_all_gather, group=group)
def new_group(self, ranks: List[int], **kwargs: Any) -> Any:
if isinstance(ranks, list) and all(isinstance(item, int) for item in ranks):
return self._do_new_group(ranks, **kwargs)
else:
raise ValueError("Argument ranks should be list of int")
def broadcast(
self, tensor: Union[torch.Tensor, float, str, None], src: int = 0, safe_mode: bool = False
) -> Union[torch.Tensor, float, str]:
if not (isinstance(tensor, (torch.Tensor, Number, str)) or tensor is None):
raise TypeError(f"Unhandled input type {type(tensor)}")
rank = self.get_rank()
if tensor is None:
if rank == src:
raise ValueError("Source data can not be None")
elif not safe_mode:
raise ValueError("Argument safe_mode should be True if None is passed for non src rank")
device = self.device()
tensor_to_number = tensor_to_str = False
if safe_mode:
tensor = self._setup_placeholder(tensor, device, rank == src)
if tensor is None:
raise RuntimeError("Internal error, tensor is None. Please, file an issue if you encounter this error.")
if isinstance(tensor, (Number, float)): # have to use Number and float to satisfy mypy
tensor_to_number = True
if rank != src:
tensor = torch.empty(1, device=device, dtype=torch.float)
else:
tensor = torch.tensor([tensor], device=device, dtype=torch.float)
elif isinstance(tensor, str):
tensor_to_str = True
max_length = self._get_max_length(tensor, device)
if rank != src:
tensor = torch.empty(1, max_length + 1, device=device, dtype=torch.long)
else:
tensor = self._encode_str(tensor, device, size=max_length)
tensor = self._apply_op(tensor, device, self._do_broadcast, src)
if tensor_to_number:
return tensor.item()
if tensor_to_str:
list_str = self._decode_str(tensor)
return list_str[0]
return tensor
@abstractmethod
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
pass
@abstractmethod
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
pass
@abstractmethod
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
pass
@abstractmethod
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
pass
@abstractmethod
def barrier(self) -> None:
pass
@abstractmethod
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
pass
class _SerialModel(ComputationModel):
"""Private class defines non-distributed computation model for code compatibility with other distributed models."""
name = "serial"
available_backends = ()
def __init__(self, _backend: Optional[str] = None, **kwargs: Any) -> None:
super(_SerialModel, self).__init__()
def get_local_rank(self) -> int:
return 0
def get_rank(self) -> int:
return 0
def get_world_size(self) -> int:
return 1
def get_nproc_per_node(self) -> int:
return 1
def get_nnodes(self) -> int:
return 1
def get_node_rank(self) -> int:
return 0
def device(self) -> torch.device:
if torch.cuda.is_available():
return torch.device("cuda")
return torch.device("cpu")
def backend(self) -> Optional[str]:
return None
def finalize(self) -> None:
pass
def _compute_nproc_per_node(self) -> int:
return 1
@staticmethod
def create_from_context() -> "_SerialModel":
return _SerialModel()
@staticmethod
def create_from_backend(backend: Optional[str] = None, **kwargs: Any) -> "_SerialModel":
return _SerialModel()
@staticmethod
def spawn(*args: Any, **kwargs: Any) -> None:
raise NotImplementedError("Serial computation model does not implement spawn method")
def all_reduce(
self, tensor: Union[torch.Tensor, float], op: str = "SUM", group: Optional[Any] = None
) -> Union[torch.Tensor, float]:
return tensor
def all_gather(
self, tensor: Union[torch.Tensor, float, str], group: Optional[Any] = None
) -> Union[torch.Tensor, float, List[float], List[str]]:
if isinstance(tensor, torch.Tensor):
return tensor
return cast(Union[List[float], List[str]], [tensor])
def broadcast(
self, tensor: Union[torch.Tensor, float, str, None], src: int = 0, safe_mode: bool = False
) -> Union[torch.Tensor, float, str]:
if tensor is None:
raise ValueError("Argument tensor should not be None")
return tensor
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
return tensor
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
return tensor
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> Any:
return tensor
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return ranks
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
return tensor
def barrier(self) -> None:
pass
def new_group(self, ranks: List[int], **kwargs: Any) -> Any:
if isinstance(ranks, list) and all(isinstance(item, int) for item in ranks):
return self._do_new_group(ranks, **kwargs)
else:
raise ValueError("Argument ranks should be list of int")
|
# -*- coding: utf-8 -*-
import warnings
from typing import Any, Dict, List, Tuple, Union
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.metrics import Metric
class GpuInfo(Metric):
"""Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
on each iterations.
.. Note ::
In case if gpu utilization reports "N/A" on a given GPU, corresponding metric value is not set.
Examples:
.. code-block:: python
# Default GPU measurements
GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])
# Progress bar will looks like
# Epoch [2/10]: [12/24] 50%|█████ , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]
# Logging with Tensorboard
tb_logger.attach(trainer,
log_handler=OutputHandler(tag="training", metric_names='all'),
event_name=Events.ITERATION_COMPLETED)
"""
def __init__(self) -> None:
try:
from pynvml.smi import nvidia_smi
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires pynvml to be installed. "
"Please install it with command: \n pip install pynvml"
)
# Let's check available devices
if not torch.cuda.is_available():
raise RuntimeError("This contrib module requires available GPU")
# Let it fail if no libnvidia drivers or NMVL library found
self.nvsmi = nvidia_smi.getInstance()
super(GpuInfo, self).__init__()
def reset(self) -> None:
pass
def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
pass
def compute(self) -> List[Dict[str, Any]]:
data: Dict[str, List[Dict[str, Any]]] = self.nvsmi.DeviceQuery("memory.used, memory.total, utilization.gpu")
if len(data) == 0 or ("gpu" not in data):
warnings.warn("No GPU information available")
return []
return data["gpu"]
def completed(self, engine: Engine, name: str) -> None:
data = self.compute()
if len(data) < 1:
warnings.warn("No GPU information available")
return
for i, data_by_rank in enumerate(data):
mem_name = f"{name}:{i} mem(%)"
if "fb_memory_usage" not in data_by_rank:
warnings.warn(f"No GPU memory usage information available in {data_by_rank}")
continue
mem_report = data_by_rank["fb_memory_usage"]
if not ("used" in mem_report and "total" in mem_report):
warnings.warn(
"GPU memory usage information does not provide used/total "
f"memory consumption information in {mem_report}"
)
continue
engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"])
for i, data_by_rank in enumerate(data):
util_name = f"{name}:{i} util(%)"
if "utilization" not in data_by_rank:
warnings.warn(f"No GPU utilization information available in {data_by_rank}")
continue
util_report = data_by_rank["utilization"]
if not ("gpu_util" in util_report):
warnings.warn(f"GPU utilization information does not provide 'gpu_util' information in {util_report}")
continue
try:
engine.state.metrics[util_name] = int(util_report["gpu_util"])
except ValueError:
# Do not set GPU utilization information
pass
# TODO: see issue https://github.com/pytorch/ignite/issues/1405
def attach( # type: ignore
self, engine: Engine, name: str = "gpu", event_name: Union[str, EventEnum] = Events.ITERATION_COMPLETED
) -> None:
engine.add_event_handler(event_name, self.completed, name)
|
from typing import Any, Callable, cast, Tuple, Union
import torch
from ignite import distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import EpochMetric
def roc_auc_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
from sklearn.metrics import roc_auc_score
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return roc_auc_score(y_true, y_pred)
def roc_auc_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
from sklearn.metrics import roc_curve
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return roc_curve(y_true, y_pred)
class ROC_AUC(EpochMetric):
"""Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC)
accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.roc_auc_score <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `roc_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#
sklearn.metrics.roc_auc_score>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Note:
ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = ROC_AUC(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
roc_auc = ROC_AUC()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]])
y_true = torch.tensor([[0], [0], [1], [0]])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['roc_auc'])
.. testoutput::
0.6666...
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
try:
from sklearn.metrics import roc_auc_score # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
super(ROC_AUC, self).__init__(
roc_auc_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn, device=device
)
class RocCurve(EpochMetric):
"""Compute Receiver operating characteristic (ROC) for binary classification task
by accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.roc_curve <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.roc_curve.html#sklearn.metrics.roc_curve>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `sklearn.metrics.roc_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html#
sklearn.metrics.roc_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Note:
RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = RocCurve(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
roc_auc = RocCurve()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 0])
state = default_evaluator.run([[y_pred, y_true]])
print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()])
print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()])
print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()])
.. testoutput::
FPR [0.0, 0.333, 0.333, 1.0]
TPR [0.0, 0.0, 1.0, 1.0]
Thresholds [inf, 1.0, 0.711, 0.047]
.. versionchanged:: 0.4.11
added `device` argument
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
try:
from sklearn.metrics import roc_curve # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
super(RocCurve, self).__init__(
roc_auc_curve_compute_fn, # type: ignore[arg-type]
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("RocCurve must have at least one example before it can be computed.")
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
fpr, tpr, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
fpr = torch.tensor(fpr, device=_prediction_tensor.device)
tpr = torch.tensor(tpr, device=_prediction_tensor.device)
thresholds = torch.tensor(thresholds, device=_prediction_tensor.device)
else:
fpr, tpr, thresholds = None, None, None
if ws > 1:
# broadcast result to all processes
fpr = idist.broadcast(fpr, src=0, safe_mode=True)
tpr = idist.broadcast(tpr, src=0, safe_mode=True)
thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
return fpr, tpr, thresholds
|
from typing import Any, Callable, cast, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import EpochMetric
def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
try:
from sklearn.metrics import precision_recall_curve
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return precision_recall_curve(y_true, y_pred)
class PrecisionRecallCurve(EpochMetric):
"""Compute precision-recall pairs for different probability thresholds for binary classification task
by accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `precision_recall_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
#sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
Note:
PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
or confidence values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 1])
prec_recall_curve = PrecisionRecallCurve()
prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
state = default_evaluator.run([[y_pred, y_true]])
print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
.. testoutput::
Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
Recall [1.0, 1.0, 1.0, 0.5, 0.0]
Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(PrecisionRecallCurve, self).__init__(
precision_recall_curve_compute_fn, # type: ignore[arg-type]
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
if self._result is None:
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
precision, recall, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
precision = torch.tensor(precision, device=_prediction_tensor.device)
recall = torch.tensor(recall, device=_prediction_tensor.device)
# thresholds can have negative strides, not compatible with torch tensors
# https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device)
else:
precision, recall, thresholds = None, None, None
if ws > 1:
# broadcast result to all processes
precision = idist.broadcast(precision, src=0, safe_mode=True)
recall = idist.broadcast(recall, src=0, safe_mode=True)
thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
self._result = (precision, recall, thresholds) # type: ignore[assignment]
return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result)
|
import ignite.contrib.metrics.regression
from ignite.contrib.metrics.average_precision import AveragePrecision
from ignite.contrib.metrics.cohen_kappa import CohenKappa
from ignite.contrib.metrics.gpu_info import GpuInfo
from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve
from ignite.contrib.metrics.roc_auc import ROC_AUC, RocCurve
|
from typing import Callable, Union
import torch
from ignite.metrics import EpochMetric
def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
from sklearn.metrics import average_precision_score
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return average_precision_score(y_true, y_pred)
class AveragePrecision(EpochMetric):
"""Computes Average Precision accumulating predictions and the ground-truth during an epoch
and applying `sklearn.metrics.average_precision_score <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `average_precision_score
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
#sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Note:
AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
confidence values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def activated_output_transform(output):
y_pred, y = output
y_pred = torch.softmax(y_pred, dim=1)
return y_pred, y
avg_precision = AveragePrecision(activated_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
y_pred = torch.tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]])
avg_precision = AveragePrecision()
avg_precision.attach(default_evaluator, 'average_precision')
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['average_precision'])
.. testoutput::
0.9166...
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
try:
from sklearn.metrics import average_precision_score # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
super(AveragePrecision, self).__init__(
average_precision_compute_fn,
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
|
from typing import Callable, Optional, Union
import torch
from ignite.metrics import EpochMetric
class CohenKappa(EpochMetric):
"""Compute different types of Cohen's Kappa: Non-Wieghted, Linear, Quadratic.
Accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.cohen_kappa_score <https://scikit-learn.org/stable/modules/
generated/sklearn.metrics.cohen_kappa_score.html>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
weights: a string is used to define the type of Cohen's Kappa whether Non-Weighted or Linear
or Quadratic. Default, None.
check_compute_fn: Default False. If True, `cohen_kappa_score
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html>`_
is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = CohenKappa()
metric.attach(default_evaluator, 'ck')
y_true = torch.tensor([2, 0, 2, 2, 0, 1])
y_pred = torch.tensor([0, 0, 2, 2, 0, 2])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['ck'])
.. testoutput::
0.4285...
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
weights: Optional[str] = None,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
try:
from sklearn.metrics import cohen_kappa_score # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
if weights not in (None, "linear", "quadratic"):
raise ValueError("Kappa Weighting type must be None or linear or quadratic.")
# initalize weights
self.weights = weights
self.cohen_kappa_compute = self.get_cohen_kappa_fn()
super(CohenKappa, self).__init__(
self.cohen_kappa_compute,
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def get_cohen_kappa_fn(self) -> Callable[[torch.Tensor, torch.Tensor], float]:
"""Return a function computing Cohen Kappa from scikit-learn."""
from sklearn.metrics import cohen_kappa_score
def wrapper(y_targets: torch.Tensor, y_preds: torch.Tensor) -> float:
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return cohen_kappa_score(y_true, y_pred, weights=self.weights)
return wrapper
|
from abc import abstractmethod
from typing import Tuple
import torch
from ignite.metrics import Metric
from ignite.metrics.metric import reinit__is_reduced
def _check_output_shapes(output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
c1 = y_pred.ndimension() == 2 and y_pred.shape[1] == 1
if not (y_pred.ndimension() == 1 or c1):
raise ValueError(f"Input y_pred should have shape (N,) or (N, 1), but given {y_pred.shape}")
c2 = y.ndimension() == 2 and y.shape[1] == 1
if not (y.ndimension() == 1 or c2):
raise ValueError(f"Input y should have shape (N,) or (N, 1), but given {y.shape}")
if y_pred.shape != y.shape:
raise ValueError(f"Input data shapes should be the same, but given {y_pred.shape} and {y.shape}")
def _check_output_types(output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
if y_pred.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError(f"Input y_pred dtype should be float 16, 32 or 64, but given {y_pred.dtype}")
if y.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError(f"Input y dtype should be float 16, 32 or 64, but given {y.dtype}")
def _torch_median(output: torch.Tensor) -> float:
output = output.view(-1)
len_ = len(output)
if len_ % 2 == 0:
return float((torch.kthvalue(output, len_ // 2)[0] + torch.kthvalue(output, len_ // 2 + 1)[0]) / 2)
else:
return float(torch.kthvalue(output, len_ // 2 + 1)[0])
class _BaseRegression(Metric):
# Base class for all regression metrics
# `update` method check the shapes and call internal overloaded
# method `_update`.
@reinit__is_reduced
def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
_check_output_shapes(output)
_check_output_types(output)
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:
y_pred = y_pred.squeeze(dim=-1)
if y.ndimension() == 2 and y.shape[1] == 1:
y = y.squeeze(dim=-1)
self._update((y_pred, y))
@abstractmethod
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
pass
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MeanAbsoluteRelativeError(_BaseRegression):
r"""Calculate Mean Absolute Relative Error.
.. math::
\text{MARE} = \frac{1}{n}\sum_{j=1}^n\frac{\left|A_j-P_j\right|}{\left|A_j\right|}
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in the reference `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanAbsoluteRelativeError()
metric.attach(default_evaluator, 'mare')
y_true = torch.tensor([1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mare'])
.. testoutput::
0.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_absolute_relative_errors = torch.tensor(0.0, device=self._device)
self._num_samples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if (y == 0).any():
raise NotComputableError("The ground truth has 0.")
absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred))
self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(self._device)
self._num_samples += y.size()[0]
@sync_all_reduce("_sum_of_absolute_relative_errors", "_num_samples")
def compute(self) -> float:
if self._num_samples == 0:
raise NotComputableError(
"MeanAbsoluteRelativeError must have at least one sample before it can be computed."
)
return self._sum_of_absolute_relative_errors.item() / self._num_samples
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class FractionalBias(_BaseRegression):
r"""Calculates the Fractional Bias.
.. math::
\text{FB} = \frac{1}{n}\sum_{j=1}^n\frac{2 (A_j - P_j)}{A_j + P_j}
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = FractionalBias()
metric.attach(default_evaluator, 'fractional_bias')
y_pred = torch.tensor([[3.8], [9.9], [5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['fractional_bias'])
.. testoutput::
0.4000...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, dtype=torch.double, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = 2 * (y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred) + 1e-30)
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("FractionalBias must have at least one example before it can be computed.")
return self._sum_of_errors.item() / self._num_examples
|
from typing import Callable, Union
import torch
from ignite.contrib.metrics.regression._base import _torch_median
from ignite.metrics import EpochMetric
def median_absolute_percentage_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred))
return 100.0 * _torch_median(e)
class MedianAbsolutePercentageError(EpochMetric):
r"""Calculates the Median Absolute Percentage Error.
.. math::
\text{MdAPE} = 100 \cdot \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j|} \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
__ https://arxiv.org/abs/1809.03006
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MedianAbsolutePercentageError()
metric.attach(default_evaluator, 'mape')
y_true = torch.tensor([1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mape'])
.. testoutput::
25.0...
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
super(MedianAbsolutePercentageError, self).__init__(
median_absolute_percentage_error_compute_fn, output_transform=output_transform, device=device
)
|
from typing import cast, List, Tuple
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced
class GeometricMeanRelativeAbsoluteError(_BaseRegression):
r"""Calculates the Geometric Mean Relative Absolute Error.
.. math::
\text{GMRAE} = \exp(\frac{1}{n}\sum_{j=1}^n \ln\frac{|A_j - P_j|}{|A_j - \bar{A}|})
where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value
and :math: `bar{A}` is the mean of the ground truth.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
.. warning::
Current implementation of GMRAE stores all input data (output and target)
as tensors before computing the metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
In distributed configuration, all stored data (output and target) is mutually collected across all processes
using all gather collective operation. This can potentially lead to a memory error.
Compute method compute the metric on zero rank process only and final result is broadcasted to
all processes.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = GeometricMeanRelativeAbsoluteError()
metric.attach(default_evaluator, 'gmare')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['gmare'])
.. testoutput::
0.0...
"""
@reinit__is_reduced
def reset(self) -> None:
self._predictions: List[torch.Tensor] = []
self._targets: List[torch.Tensor] = []
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
y_pred = y_pred.clone().to(self._device)
y = y.clone().to(self._device)
self._predictions.append(y_pred)
self._targets.append(y)
def compute(self) -> float:
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError(
"GeometricMeanRelativeAbsoluteError must have at least one example before it can be computed."
)
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
result = torch.exp(
torch.log(
torch.abs(_target_tensor - _prediction_tensor) / torch.abs(_target_tensor - _target_tensor.mean())
).mean()
).item()
return result
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MaximumAbsoluteError(_BaseRegression):
r"""Calculates the Maximum Absolute Error.
.. math::
\text{MaxAE} = \max_{j=1,n} \left( \lvert A_j-P_j \rvert \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MaximumAbsoluteError()
metric.attach(default_evaluator, 'mae')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mae'])
.. testoutput::
1.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._max_of_absolute_errors: float = -1
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
mae = torch.abs(y_pred - y.view_as(y_pred)).max().item()
if self._max_of_absolute_errors < mae:
self._max_of_absolute_errors = mae
@sync_all_reduce("_max_of_absolute_errors:MAX")
def compute(self) -> float:
if self._max_of_absolute_errors < 0:
raise NotComputableError("MaximumAbsoluteError must have at least one example before it can be computed.")
return self._max_of_absolute_errors
|
from ignite.contrib.metrics.regression.canberra_metric import CanberraMetric
from ignite.contrib.metrics.regression.fractional_absolute_error import FractionalAbsoluteError
from ignite.contrib.metrics.regression.fractional_bias import FractionalBias
from ignite.contrib.metrics.regression.geometric_mean_absolute_error import GeometricMeanAbsoluteError
from ignite.contrib.metrics.regression.geometric_mean_relative_absolute_error import GeometricMeanRelativeAbsoluteError
from ignite.contrib.metrics.regression.manhattan_distance import ManhattanDistance
from ignite.contrib.metrics.regression.maximum_absolute_error import MaximumAbsoluteError
from ignite.contrib.metrics.regression.mean_absolute_relative_error import MeanAbsoluteRelativeError
from ignite.contrib.metrics.regression.mean_error import MeanError
from ignite.contrib.metrics.regression.mean_normalized_bias import MeanNormalizedBias
from ignite.contrib.metrics.regression.median_absolute_error import MedianAbsoluteError
from ignite.contrib.metrics.regression.median_absolute_percentage_error import MedianAbsolutePercentageError
from ignite.contrib.metrics.regression.median_relative_absolute_error import MedianRelativeAbsoluteError
from ignite.contrib.metrics.regression.r2_score import R2Score
from ignite.contrib.metrics.regression.wave_hedges_distance import WaveHedgesDistance
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MeanError(_BaseRegression):
r"""Calculates the Mean Error.
.. math::
\text{ME} = \frac{1}{n}\sum_{j=1}^n (A_j - P_j)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in the reference `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanError()
metric.attach(default_evaluator, 'me')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['me'])
.. testoutput::
0.625...
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = y.view_as(y_pred) - y_pred
self._sum_of_errors += torch.sum(errors).item()
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("MeanError must have at least one example before it can be computed.")
return self._sum_of_errors.item() / self._num_examples
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class GeometricMeanAbsoluteError(_BaseRegression):
r"""Calculates the Geometric Mean Absolute Error.
.. math::
\text{GMAE} = \exp(\frac{1}{n}\sum_{j=1}^n\ln(|A_j - P_j|))
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = GeometricMeanAbsoluteError()
metric.attach(default_evaluator, 'gmae')
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['gmae'])
.. testoutput::
2.2723...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.log(torch.abs(y.view_as(y_pred) - y_pred))
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError(
"GeometricMeanAbsoluteError must have at least one example before it can be computed."
)
return torch.exp((self._sum_of_errors) / self._num_examples).item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class R2Score(_BaseRegression):
r"""Calculates the R-Squared, the
`coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_.
.. math::
R^2 = 1 - \frac{\sum_{j=1}^n(A_j - P_j)^2}{\sum_{j=1}^n(A_j - \bar{A})^2}
where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value and
:math:`\bar{A}` is the mean of the ground truth.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = R2Score()
metric.attach(default_evaluator, 'r2')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['r2'])
.. testoutput::
0.8035...
.. versionchanged:: 0.4.3
Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._num_examples = 0
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._y_sq_sum = torch.tensor(0.0, device=self._device)
self._y_sum = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
self._num_examples += y.shape[0]
self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)
self._y_sum += torch.sum(y).to(self._device)
self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)
@sync_all_reduce("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("R2Score must have at least one example before it can be computed.")
return 1 - self._sum_of_errors.item() / (self._y_sq_sum.item() - (self._y_sum.item() ** 2) / self._num_examples)
|
from typing import Callable, Union
import torch
from ignite.contrib.metrics.regression._base import _torch_median
from ignite.metrics import EpochMetric
def median_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred)
return _torch_median(e)
class MedianAbsoluteError(EpochMetric):
r"""Calculates the Median Absolute Error.
.. math::
\text{MdAE} = \text{MD}_{j=1,n} \left( |A_j - P_j| \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
__ https://arxiv.org/abs/1809.03006
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MedianAbsoluteError()
metric.attach(default_evaluator, 'mae')
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mae'])
.. testoutput::
0.625
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
super(MedianAbsoluteError, self).__init__(
median_absolute_error_compute_fn, output_transform=output_transform, device=device
)
|
from typing import Callable, Union
import torch
from ignite.contrib.metrics.regression._base import _torch_median
from ignite.metrics import EpochMetric
def median_relative_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred) - torch.mean(y))
return _torch_median(e)
class MedianRelativeAbsoluteError(EpochMetric):
r"""Calculates the Median Relative Absolute Error.
.. math::
\text{MdRAE} = \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j - \bar{A}|} \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
__ https://arxiv.org/abs/1809.03006
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MedianRelativeAbsoluteError()
metric.attach(default_evaluator, 'mrae')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mrae'])
.. testoutput::
0.5...
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
super(MedianRelativeAbsoluteError, self).__init__(
median_relative_absolute_error_compute_fn, output_transform=output_transform, device=device
)
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class CanberraMetric(_BaseRegression):
r"""Calculates the Canberra Metric.
.. math::
\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
.. _scikit-learn distance metrics:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
.. _`Botchkarev 2018`:
https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = CanberraMetric()
metric.attach(default_evaluator, 'canberra')
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['canberra'])
.. testoutput::
0.8000...
.. versionchanged:: 0.4.3
- Fixed implementation: ``abs`` in denominator.
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y) + 1e-15)
self._sum_of_errors += torch.sum(errors).to(self._device)
@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
return self._sum_of_errors.item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class WaveHedgesDistance(_BaseRegression):
r"""Calculates the Wave Hedges Distance.
.. math::
\text{WHD} = \sum_{j=1}^n\frac{|A_j - P_j|}{max(A_j, P_j)}
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = WaveHedgesDistance()
metric.attach(default_evaluator, 'whd')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['whd'])
.. testoutput::
1.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.max(y_pred, y.view_as(y_pred)) + 1e-30)
self._sum_of_errors += torch.sum(errors).to(self._device)
@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
return self._sum_of_errors.item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class FractionalAbsoluteError(_BaseRegression):
r"""Calculates the Fractional Absolute Error.
.. math::
\text{FAE} = \frac{1}{n}\sum_{j=1}^n\frac{2 |A_j - P_j|}{|A_j| + |P_j|}
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = FractionalAbsoluteError()
metric.attach(default_evaluator, 'fractional_abs_error')
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 0.8
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['fractional_abs_error'])
.. testoutput::
0.2222...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = 2 * torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred)))
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_num_examples", "_sum_of_errors")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError(
"FractionalAbsoluteError must have at least one example before it can be computed."
)
return self._sum_of_errors.item() / self._num_examples
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class ManhattanDistance(_BaseRegression):
r"""Calculates the Manhattan Distance.
.. math::
\text{MD} = \sum_{j=1}^n |A_j - P_j|
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `scikit-learn distance metrics`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = ManhattanDistance()
metric.attach(default_evaluator, 'manhattan')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['manhattan'])
.. testoutput::
3.75...
.. versionchanged:: 0.4.3
- Fixed sklearn compatibility.
- Workes with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
errors = torch.abs(y - y_pred)
self._sum_of_errors += torch.sum(errors).to(self._device)
@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
return self._sum_of_errors.item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MeanNormalizedBias(_BaseRegression):
r"""Calculates the Mean Normalized Bias.
.. math::
\text{MNB} = \frac{1}{n}\sum_{j=1}^n\frac{A_j - P_j}{A_j}
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in the reference `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanNormalizedBias()
metric.attach(default_evaluator, 'mnb')
y_true = torch.tensor([1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mnb'])
.. testoutput::
0.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if (y == 0).any():
raise NotComputableError("The ground truth has 0.")
errors = (y.view_as(y_pred) - y_pred) / y
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("MeanNormalizedBias must have at least one example before it can be computed.")
return self._sum_of_errors.item() / self._num_examples
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.