python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.generic.util import convert_to_one_hot
class TestUtils(unittest.TestCase):
def test_single(self):
targets = torch.tensor([[4]])
one_hot_target = convert_to_one_hot(targets, 5)
self.assertTrue(torch.allclose(one_hot_target, torch.tensor([[0, 0, 0, 0, 1]])))
def test_two(self):
targets = torch.tensor([[0], [1]])
one_hot_target = convert_to_one_hot(targets, 3)
self.assertTrue(
torch.allclose(one_hot_target, torch.tensor([[1, 0, 0], [0, 1, 0]]))
)
| ClassyVision-main | test/losses_generic_utils_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
from classy_vision.models import ClassyBlock
class TestClassyStatelessBlock(unittest.TestCase):
def setUp(self):
"""
This test checks on output stateful (default) and stateless variants of ClassyBlock
by enabling and propagating the environmental variable CLASSY_BLOCK_STATELESS
"""
# initialize stateful model
self._model_stateful = ClassyBlock(name="stateful", module=torch.nn.Identity())
# initialize stateless model
os.environ["CLASSY_BLOCK_STATELESS"] = "1"
self._model_stateless = ClassyBlock(
name="stateless", module=torch.nn.Identity()
)
# note: use low=1 since default of ClassyBlock output variable is torch.zeros
self._data = torch.randint(low=1, high=5, size=(3, 5, 5))
def tearDown(self):
# environmental variables do not propagate outside the scope of this test
# but we'll clean it up anyway
del os.environ["CLASSY_BLOCK_STATELESS"]
def test_classy_output_stateless(self):
# confirm model.output is (stateless) i.e. default of torch.zeros(0) and
# that output == data
output = self._model_stateless.forward(self._data)
self.assertTrue(torch.equal(self._model_stateless.output, torch.zeros(0)))
self.assertTrue(torch.equal(output, self._data))
def test_classy_output_stateful(self):
# confirm model.output keeps input data and that output == data
output = self._model_stateful.forward(self._data)
self.assertTrue(torch.equal(self._model_stateful.output, output))
self.assertTrue(torch.equal(output, self._data))
| ClassyVision-main | test/models_classy_block_stateless_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.models import build_model, EfficientNet
class TestEfficientNetModel(unittest.TestCase):
def get_model_config(self, use_model_name=False):
model_config = {
"name": "efficientnet",
"model_params": {
"width_coefficient": 1.1,
"depth_coefficient": 1.2,
"resolution": 260,
"dropout_rate": 0.3,
},
"bn_momentum": 0.01,
"bn_epsilon": 1e-3,
"drop_connect_rate": 0.2,
"num_classes": 1000,
"width_divisor": 8,
"min_width": None,
"use_se": True,
}
if use_model_name:
del model_config["model_params"]
model_config["model_name"] = "B2"
return model_config
def test_build_model(self):
"""
Test that the model builds using a config using either model_params or
model_name.
"""
for use_model_name in [True, False]:
model = build_model(self.get_model_config(use_model_name=use_model_name))
assert isinstance(model, EfficientNet)
def test_build_preset_model(self):
configs = [{"name": f"efficientnet_b{i}" for i in range(8)}]
for config in configs:
model = build_model(config)
self.assertIsInstance(model, EfficientNet)
def test_model_forward(self):
image_shape = (3, 260, 260)
num_images = (10,)
input = torch.randn(num_images + image_shape)
model = build_model(self.get_model_config())
model(input)
| ClassyVision-main | test/models_efficientnet_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
import torch.nn as nn
from classy_vision.hooks import ClassyHook
from classy_vision.hooks.precise_batch_norm_hook import PreciseBatchNormHook
from classy_vision.tasks import build_task
from classy_vision.trainer import ClassyTrainer
from test.generic.config_utils import get_test_mlp_task_config
from test.generic.hook_test_utils import HookTestBase
class TestPreciseBatchNormHook(HookTestBase):
def _get_bn_stats(self, model):
model = copy.deepcopy(model)
stats = {}
for name, module in model.named_modules():
if isinstance(module, nn.modules.batchnorm._BatchNorm):
stats[name] = {"mean": module.running_mean, "var": module.running_var}
return stats
def _compare_bn_stats(self, stats_1, stats_2):
# make sure the stats are non empty
self.assertGreater(len(stats_1), 0)
for name in stats_1:
if not torch.allclose(
stats_1[name]["mean"], stats_2[name]["mean"]
) or not torch.allclose(stats_1[name]["var"], stats_2[name]["var"]):
return False
return True
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
self.constructor_test_helper(
config={"num_samples": 10},
hook_type=PreciseBatchNormHook,
hook_registry_name="precise_bn",
invalid_configs=[{}, {"num_samples": 0}],
)
def test_train(self):
config = get_test_mlp_task_config()
task = build_task(config)
num_samples = 10
for cache_sample in [True, False]:
precise_batch_norm_hook = PreciseBatchNormHook(num_samples, cache_sample)
task.set_hooks([precise_batch_norm_hook])
task.prepare()
trainer = ClassyTrainer()
trainer.train(task)
def test_bn_stats(self):
base_self = self
class TestHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self):
self.train_bn_stats = None
self.test_bn_stats = None
def on_step(self, task):
if task.train:
self.train_bn_stats = base_self._get_bn_stats(task.base_model)
else:
self.test_bn_stats = base_self._get_bn_stats(task.base_model)
config = get_test_mlp_task_config()
task = build_task(config)
num_samples = 10
precise_batch_norm_hook = PreciseBatchNormHook(num_samples)
test_hook = TestHook()
task.set_hooks([precise_batch_norm_hook, test_hook])
trainer = ClassyTrainer()
trainer.train(task)
updated_bn_stats = self._get_bn_stats(task.base_model)
# the stats should be modified after train steps but not after test steps
self.assertFalse(
self._compare_bn_stats(test_hook.train_bn_stats, updated_bn_stats)
)
self.assertTrue(
self._compare_bn_stats(test_hook.test_bn_stats, updated_bn_stats)
)
| ClassyVision-main | test/hooks_precise_batch_norm_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import RecallAtKMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestRecallAtKMeter(ClassificationMeterTest):
def test_recall_meter_registry(self):
meter = meters.build_meter({"name": "recall_at_k", "topk": [1, 3]})
self.assertTrue(isinstance(meter, RecallAtKMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0?1
]
)
# One-hot encoding, 1 = positive for class
# sample-1: 1, sample-2: 0, sample-3: 0,1,2
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
# Note for ties, we select randomly, so we should not use ambiguous ties
expected_value = {"top_1": 2 / 5.0, "top_2": 4 / 5.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_double_meter_update_and_reset(self):
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]),
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
]
# One-hot encoding, 1 = positive for class
# batch-1: sample-1: 1, sample-2: 0, sample-3: 0,1,2
# batch-2: sample-1: 1, sample-2: 1, sample-3: 1
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]),
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
]
# First batch has top-1 recall of 2/5.0, top-2 recall of 4/5.0
# Second batch has top-1 recall of 2/3.0, top-2 recall of 2/3.0
expected_value = {"top_1": 4 / 8.0, "top_2": 6 / 8.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_meter_invalid_model_output(self):
meter = RecallAtKMeter(topk=[1, 2])
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[0.33, 0.33, 0.34], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]]
)
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = RecallAtKMeter(topk=[1, 2])
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0/1
]
)
# Target shape is of length 3
target = torch.tensor([[[0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_topk(self):
meter = RecallAtKMeter(topk=[1, 5])
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0/1
]
)
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
#
# Expected value is the expected value of meter1 For this test
# to work, top-1 / top-2 values of meter0 / meter1 should be
# different
meters = [RecallAtKMeter(topk=[1, 2]), RecallAtKMeter(topk=[1, 2])]
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]),
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
]
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 0]]),
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
]
# Second update's expected value
expected_value = {"top_1": 2 / 3.0, "top_2": 2 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [RecallAtKMeter(topk=[1, 2]), RecallAtKMeter(topk=[1, 2])]
# Batchsize = 3, num classes = 3, score is probability of class
model_outputs = [
torch.tensor(
[[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]
), # Meter 0
torch.tensor(
[[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]
), # Meter 1
torch.tensor(
[[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]
), # Meter 0
torch.tensor(
[[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]
), # Meter 1
]
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]), # Meter 0
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]), # Meter 1
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]), # Meter 0
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]), # Meter 1
]
# In first two updates there are 4 correct top-1 out of 8
# total, 6 correct in top 2 out of 8. The same occurs in the
# second two updates and is added to first
expected_values = [
{"top_1": 4 / 8.0, "top_2": 6 / 8.0}, # After one update to each meter
{"top_1": 8 / 16.0, "top_2": 12 / 16.0}, # After two updates to each meter
]
self.meter_distributed_test(meters, model_outputs, targets, expected_values)
def test_non_onehot_target(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 2, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
torch.tensor([[0.2, 0.4, 0.4], [0.2, 0.65, 0.15], [0.1, 0.8, 0.1]]),
]
# One-hot encoding, 1 = positive for class
targets = [
torch.tensor([[1], [1], [1]]), # [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
torch.tensor([[0], [1], [2]]), # [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
]
# Note for ties, we select randomly, so we should not use ambiguous ties
# First batch has top-1 recall of 2/3.0, top-2 recall of 2/6.0
# Second batch has top-1 recall of 1/3.0, top-2 recall of 4/6.0
expected_value = {"top_1": 3 / 6.0, "top_2": 6 / 12.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_non_onehot_target_one_dim_target(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update with one dimensional targets.
"""
meter = RecallAtKMeter(topk=[1, 2], target_is_one_hot=False, num_classes=3)
# Batchsize = 2, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
torch.tensor([[0.2, 0.4, 0.4], [0.2, 0.65, 0.15], [0.1, 0.8, 0.1]]),
]
# One-hot encoding, 1 = positive for class
targets = [
torch.tensor([1, 1, 1]), # [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
torch.tensor([0, 1, 2]), # [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
]
# Note for ties, we select randomly, so we should not use ambiguous ties
# First batch has top-1 recall of 2/3.0, top-2 recall of 2/6.0
# Second batch has top-1 recall of 1/3.0, top-2 recall of 4/6.0
expected_value = {"top_1": 3 / 6.0, "top_2": 6 / 12.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_meter_fp16(self):
"""
This test verifies that the meter works if the input tensor is fp16.
"""
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0?1
]
).half()
# One-hot encoding, 1 = positive for class
# sample-1: 1, sample-2: 0, sample-3: 0,1,2
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]).half()
# Note for ties, we select randomly, so we should not use ambiguous ties
expected_value = {"top_1": 2 / 5.0, "top_2": 4 / 5.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
| ClassyVision-main | test/meters_recall_meter_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import unittest.mock as mock
from classy_vision.hooks import ProfilerHook
from test.generic.config_utils import get_test_classy_task, get_test_classy_video_task
from test.generic.hook_test_utils import HookTestBase
class TestProfilerHook(HookTestBase):
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {}
self.constructor_test_helper(
config=config, hook_type=ProfilerHook, hook_registry_name="profiler"
)
@mock.patch("torch.autograd.profiler.profile", auto_spec=True)
@mock.patch("classy_vision.hooks.profiler_hook.summarize_profiler_info")
def test_profiler(
self,
mock_summarize_profiler_info: mock.MagicMock,
mock_profile_cls: mock.MagicMock,
) -> None:
"""
Tests that a profile instance is returned by the profiler
and that the profiler actually ran.
"""
mock_summarize_profiler_info.return_value = ""
mock_profile = mock.MagicMock()
mock_profile_returned = mock.MagicMock()
mock_profile.__enter__.return_value = mock_profile_returned
mock_profile_cls.return_value = mock_profile
for task in [get_test_classy_task(), get_test_classy_video_task()]:
task.prepare()
# create a model tensorboard hook
profiler_hook = ProfilerHook()
with self.assertLogs():
profiler_hook.on_start(task)
# a new profile should be created with use_cuda=True
mock_profile_cls.assert_called_once_with(use_cuda=True)
mock_profile_cls.reset_mock()
# summarize_profiler_info should have been called once with the profile
mock_summarize_profiler_info.assert_called_once()
profile = mock_summarize_profiler_info.call_args[0][0]
mock_summarize_profiler_info.reset_mock()
self.assertEqual(profile, mock_profile_returned)
| ClassyVision-main | test/hooks_profiler_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import unittest
from itertools import product
from typing import Any, Dict, List
import torch
from classy_vision.generic.distributed_util import _PRIMARY_RANK, broadcast_object
from torch.multiprocessing import Event, Process, Queue
def init_and_run_process(
rank, world_size, filename, fn, input, q, wait_event, backend="gloo"
):
torch.distributed.init_process_group(
backend, init_method=f"file://{filename}", rank=rank, world_size=world_size
)
r = fn(*input)
q.put(r)
wait_event.wait()
return
def run_in_process_group(filename: str, calls: List[Dict[str, Any]]):
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
processes = []
q = Queue()
wait_event = Event()
# run the remaining processes
# for rank in range(world_size - 1):
for rank, call in enumerate(calls):
p = Process(
target=init_and_run_process,
args=(
rank,
call["world_size"],
filename,
call["function"],
call["inputs"],
q,
wait_event,
),
)
p.start()
processes.append(p)
# fetch the results from the queue before joining, the background processes
# need to be alive if the queue contains tensors. See
# https://discuss.pytorch.org/t/using-torch-tensor-over-multiprocessing-queue-process-fails/2847/3 # noqa: B950
results = []
for _ in range(len(processes)):
results.append(q.get())
wait_event.set()
for p in processes:
p.join()
return results
class TestDistributedUtil(unittest.TestCase):
@staticmethod
def _get_test_objects():
return [
{"a": 12, "b": [2, 3, 4], "tensor": torch.randn(10, 10)},
None,
{"tensor": torch.randn(10000, 10000)}, # 400 MB
]
def test_broadcast_object(self):
world_size = 3
for use_disk, obj in product([True, False], self._get_test_objects()):
filename = tempfile.NamedTemporaryFile(delete=True).name
inputs = [None] * world_size
inputs[0] = obj # only the primary worker has the object
calls = [
{
"world_size": world_size,
"function": broadcast_object,
"inputs": [i, _PRIMARY_RANK, use_disk],
}
for i in inputs
]
results = run_in_process_group(filename, calls)
# check that all replicas got identical objects
self.assertEqual(len(results), world_size)
for result in results:
if isinstance(obj, dict):
for key in obj:
if key == "tensor":
self.assertTrue(torch.allclose(result[key], obj[key]))
else:
self.assertEqual(result[key], obj[key])
else:
self.assertEqual(result, obj)
def test_broadcast_object_pick_source(self):
world_size = 3
for use_disk, obj in product([True, False], self._get_test_objects()):
filename = tempfile.NamedTemporaryFile(delete=True).name
inputs = [None] * world_size
source_rank = 1
inputs[source_rank] = obj # only the rank 1 worker has the object
calls = [
{
"world_size": world_size,
"function": broadcast_object,
"inputs": [i, source_rank, use_disk],
}
for i in inputs
]
results = run_in_process_group(filename, calls)
# check that all replicas got identical objects
self.assertEqual(len(results), world_size)
for result in results:
if isinstance(obj, dict):
for key in obj:
if key == "tensor":
self.assertTrue(torch.allclose(result[key], obj[key]))
else:
self.assertEqual(result[key], obj[key])
else:
self.assertEqual(result, obj)
| ClassyVision-main | test/generic_distributed_util_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.losses import BarronLoss, build_loss
class TestBarronLoss(unittest.TestCase):
def _get_config(self):
return {"name": "barron", "size_average": True, "alpha": 1.0, "c": 1.0}
def _get_outputs(self):
return torch.tensor([[2.0]])
def _get_targets(self):
return torch.tensor([3.0])
def test_build_barron(self):
config = self._get_config()
crit = build_loss(config)
self.assertTrue(isinstance(crit, BarronLoss))
self.assertEqual(crit.size_average, config["size_average"])
self.assertAlmostEqual(crit.alpha, config["alpha"])
self.assertAlmostEqual(crit.c, config["c"])
def test_barron(self):
config = self._get_config()
crit = BarronLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 0.41421353816986084)
# Alpha = 0
config = self._get_config()
config["alpha"] = 0.0
crit = BarronLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 0.40546512603759766)
# Alpha = inf
config = self._get_config()
config["alpha"] = float("inf")
crit = BarronLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 0.39346933364868164)
def test_deep_copy(self):
config = self._get_config()
crit1 = build_loss(config)
self.assertTrue(isinstance(crit1, BarronLoss))
outputs = self._get_outputs()
targets = self._get_targets()
crit1(outputs, targets)
crit2 = copy.deepcopy(crit1)
self.assertAlmostEqual(
crit1(outputs, targets).item(), crit2(outputs, targets).item()
)
| ClassyVision-main | test/losses_barron_loss_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import build_param_scheduler
from classy_vision.optim.param_scheduler.composite_scheduler import (
CompositeParamScheduler,
IntervalScaling,
UpdateInterval,
)
class TestCompositeScheduler(unittest.TestCase):
_num_epochs = 10
def _get_valid_long_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "constant", "value": 0.1},
{"name": "constant", "value": 0.2},
{"name": "constant", "value": 0.3},
{"name": "constant", "value": 0.4},
],
"lengths": [0.2, 0.4, 0.1, 0.3],
}
def _get_lengths_sum_less_one_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "constant", "value": 0.1},
{"name": "constant", "value": 0.2},
],
"lengths": [0.7, 0.2999],
}
def _get_valid_mixed_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "step", "values": [0.1, 0.2, 0.3, 0.4, 0.5], "num_epochs": 10},
{"name": "cosine", "start_value": 0.42, "end_value": 0.0001},
],
"lengths": [0.5, 0.5],
}
def _get_valid_linear_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "linear", "start_value": 0.0, "end_value": 0.5},
{"name": "linear", "start_value": 0.5, "end_value": 1.0},
],
"lengths": [0.5, 0.5],
"interval_scaling": ["rescaled", "rescaled"],
}
def test_invalid_config(self):
config = self._get_valid_mixed_config()
bad_config = copy.deepcopy(config)
# No schedulers
bad_config["schedulers"] = []
bad_config["lengths"] = []
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Size of schedulers and lengths doesn't match
bad_config["schedulers"] = copy.deepcopy(config["schedulers"])
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["schedulers"].append(bad_config["schedulers"][-1])
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Sum of lengths < 1
bad_config["schedulers"] = copy.deepcopy(config["schedulers"])
bad_config["lengths"][-1] -= 0.1
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Sum of lengths > 1
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["lengths"][-1] += 0.1
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Bad value for update_interval
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["update_interval"] = "epochs"
with self.assertRaises(Exception):
CompositeParamScheduler.from_config(bad_config)
# Bad value for composition_mode
del bad_config["update_interval"]
bad_config["interval_scaling"] = ["rescaled", "rescaleds"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
# Wrong number composition modes
del bad_config["interval_scaling"]
bad_config["interval_scaling"] = ["rescaled"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
# Missing required parameters
del bad_config["interval_scaling"]
bad_config["lengths"] = config["lengths"]
del bad_config["lengths"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
bad_config["lengths"] = config["lengths"]
del bad_config["schedulers"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
def test_long_scheduler(self):
config = self._get_valid_long_config()
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.1, 0.1, 0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.4, 0.4]
self.assertEqual(schedule, expected_schedule)
def test_scheduler_lengths_within_epsilon_of_one(self):
config = self._get_lengths_sum_less_one_config()
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2]
self.assertEqual(schedule, expected_schedule)
def test_scheduler_update_interval(self):
config = self._get_valid_mixed_config()
# Check default
scheduler = CompositeParamScheduler.from_config(config)
self.assertEqual(scheduler.update_interval, UpdateInterval.STEP)
# Check step
step_config = copy.deepcopy(config)
step_config["update_interval"] = "step"
scheduler = build_param_scheduler(step_config)
self.assertEqual(scheduler.update_interval, UpdateInterval.STEP)
# Check epoch
epoch_config = copy.deepcopy(config)
epoch_config["update_interval"] = "epoch"
scheduler = build_param_scheduler(epoch_config)
self.assertEqual(scheduler.update_interval, UpdateInterval.EPOCH)
def test_build_composite_scheduler(self):
config = self._get_valid_mixed_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, CompositeParamScheduler))
schedulers = [
build_param_scheduler(scheduler_config)
for scheduler_config in config["schedulers"]
]
composite = CompositeParamScheduler(
schedulers=schedulers,
lengths=config["lengths"],
update_interval=UpdateInterval.EPOCH,
interval_scaling=[IntervalScaling.RESCALED, IntervalScaling.FIXED],
)
self.assertTrue(isinstance(composite, CompositeParamScheduler))
def test_scheduler_with_mixed_types(self):
config = self._get_valid_mixed_config()
scheduler_0 = build_param_scheduler(config["schedulers"][0])
scheduler_1 = build_param_scheduler(config["schedulers"][1])
# Check scaled
config["interval_scaling"] = ["rescaled", "rescaled"]
scheduler = CompositeParamScheduler.from_config(config)
scaled_schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, self._num_epochs, 2)
] + [
round(scheduler_1(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, self._num_epochs, 2)
]
self.assertEqual(scaled_schedule, expected_schedule)
# Check fixed
config["interval_scaling"] = ["fixed", "fixed"]
scheduler = CompositeParamScheduler.from_config(config)
fixed_schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, int(self._num_epochs / 2))
] + [
round(scheduler_1(epoch_num / self._num_epochs), 4)
for epoch_num in range(int(self._num_epochs / 2), self._num_epochs)
]
self.assertEqual(fixed_schedule, expected_schedule)
# Check that default is rescaled
del config["interval_scaling"]
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
self.assertEqual(scaled_schedule, schedule)
# Check warmup of rescaled then fixed
config["interval_scaling"] = ["rescaled", "fixed"]
scheduler = CompositeParamScheduler.from_config(config)
fixed_schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, int(self._num_epochs), 2)
] + [
round(scheduler_1(epoch_num / self._num_epochs), 4)
for epoch_num in range(int(self._num_epochs / 2), self._num_epochs)
]
self.assertEqual(fixed_schedule, expected_schedule)
def test_linear_scheduler_no_gaps(self):
config = self._get_valid_linear_config()
# Check rescaled
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
self.assertEqual(expected_schedule, schedule)
# Check fixed composition gives same result as only 1 scheduler
config["schedulers"][1] = config["schedulers"][0]
config["interval_scaling"] = ["fixed", "fixed"]
scheduler = CompositeParamScheduler.from_config(config)
linear_scheduler = build_param_scheduler(config["schedulers"][0])
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
linear_scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
self.assertEqual(expected_schedule, schedule)
| ClassyVision-main | test/optim_param_scheduler_composite_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.hooks import build_hook, build_hooks, ClassyHook, register_hook
@register_hook("test_hook")
class TestHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self, a, b):
super().__init__()
self.state.a = a
self.state.b = b
@classmethod
def from_config(cls, config):
return cls(**config)
@register_hook("test_hook_new")
class TestHookNew(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self, b, c):
super().__init__()
self.state.b = b
self.state.c = c
@classmethod
def from_config(cls, config):
return cls(**config)
class TestClassyHook(unittest.TestCase):
def test_hook_registry_and_builder(self):
config = {"name": "test_hook", "a": 1, "b": 2}
hook1 = build_hook(hook_config=config)
self.assertTrue(isinstance(hook1, TestHook))
self.assertTrue(hook1.state.a == 1)
self.assertTrue(hook1.state.b == 2)
hook_configs = [copy.deepcopy(config), copy.deepcopy(config)]
hooks = build_hooks(hook_configs=hook_configs)
for hook in hooks:
self.assertTrue(isinstance(hook, TestHook))
self.assertTrue(hook.state.a == 1)
self.assertTrue(hook.state.b == 2)
def test_state_dict(self):
a = 0
b = {1: 2, 3: [4]}
test_hook = TestHook(a, b)
state_dict = test_hook.get_classy_state()
# create a new test_hook and set its state to the old hook's.
test_hook = TestHook("", 0)
test_hook.set_classy_state(state_dict)
self.assertEqual(test_hook.state.a, a)
self.assertEqual(test_hook.state.b, b)
# make sure we're able to load old checkpoints
b_new = {1: 2}
c_new = "hello"
test_hook_new = TestHookNew(b_new, c_new)
test_hook_new.set_classy_state(state_dict)
self.assertEqual(test_hook_new.state.a, a)
self.assertEqual(test_hook_new.state.b, b)
self.assertEqual(test_hook_new.state.c, c_new)
| ClassyVision-main | test/hooks_classy_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.losses import build_loss, LabelSmoothingCrossEntropyLoss
class TestLabelSmoothingCrossEntropyLoss(unittest.TestCase):
def test_build_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.1,
}
crit = build_loss(config)
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
self.assertEqual(crit._ignore_index, -1)
def test_smoothing_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.1,
}
crit = build_loss(config)
targets = torch.tensor([[0, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor([[0.2 / 11, 0.2 / 11, 0.2 / 11, 0.2 / 11, 10.2 / 11]]),
)
)
def test_smoothing_ignore_index_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([[-1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor([[1 / 15, 1 / 15, 1 / 15, 1 / 15, 11 / 15]]),
)
)
def test_smoothing_multilabel_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([[1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[1.0, 0.0, 0.0, 0.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor([[6 / 15, 1 / 15, 1 / 15, 1 / 15, 6 / 15]]),
)
)
def test_smoothing_all_ones_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.1,
}
crit = build_loss(config)
targets = torch.tensor([[1, 1, 1, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 4)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[1.0, 1.0, 1.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 4)
self.assertTrue(
torch.allclose(smoothed_targets, torch.tensor([[0.25, 0.25, 0.25, 0.25]]))
)
def test_smoothing_mixed_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([[1, 1, 1, 1, 1], [1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(
valid_targets,
torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0]]),
)
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor(
[
[0.2, 0.2, 0.2, 0.2, 0.2],
[6 / 15, 1 / 15, 1 / 15, 1 / 15, 6 / 15],
]
),
)
)
def test_smoothing_class_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([4, -1])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(
valid_targets,
torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]]),
)
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor(
[
[1 / 15, 1 / 15, 1 / 15, 1 / 15, 11 / 15],
[0.2, 0.2, 0.2, 0.2, 0.2],
]
),
)
)
def test_unnormalized_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[0.0, 7.0, 0.0, 0.0, 2.0]])
targets = torch.tensor([[0, 0, 0, 0, 1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.07609558, places=5)
def test_ignore_index_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.2,
}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[0.0, 7.0]])
targets = torch.tensor([[-1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 3.50090909)
def test_class_integer_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.2,
}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[1.0, 2.0], [0.0, 2.0]])
targets = torch.tensor([[0], [1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 0.76176142)
def test_deep_copy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
outputs = torch.tensor([[0.0, 7.0, 0.0, 0.0, 2.0]])
targets = torch.tensor([[0, 0, 0, 0, 1]])
crit(outputs, targets)
crit2 = copy.deepcopy(crit)
self.assertAlmostEqual(crit2(outputs, targets).item(), 5.07609558, places=5)
| ClassyVision-main | test/losses_label_smoothing_cross_entropy_loss_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.optim.rmsprop_tf import RMSPropTF
from test.generic.optim_test_util import TestOptimizer
class TestRMSPropTFOptimizer(TestOptimizer, unittest.TestCase):
def _get_config(self):
return {
"name": "rmsprop_tf",
"num_epochs": 90,
"lr": 0.1,
"momentum": 0.9,
"weight_decay": 0.0001,
"alpha": 0.9,
"eps": 1e-8,
"centered": False,
}
def _instance_to_test(self):
return RMSPropTF
| ClassyVision-main | test/optim_rmsprop_tf_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import VideoAccuracyMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestVideoAccuracyMeter(ClassificationMeterTest):
def test_accuracy_meter_registry(self):
accuracy_meter = meters.build_meter(
{
"name": "video_accuracy",
"topk": [1, 2],
"clips_per_video_train": 1,
"clips_per_video_test": 2,
}
)
self.assertTrue(isinstance(accuracy_meter, VideoAccuracyMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video
target = torch.tensor([0, 0, 1, 1, 2, 2])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_update_and_reset_test(
meter, model_output, target, expected_value, is_train=False
)
def test_double_meter_update_and_reset(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}.
# Data of two batch is provided
model_outputs = [
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video, in both batches
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# First batch has top-1 accuracy of 1/3.0, top-2 accuracy of 2/3.0
# Second batch has top-1 accuracy of 2/3.0, top-2 accuracy of 3/3.0
expected_value = {"top_1": 2 / 6.0, "top_2": 6 / 6.0}
self.meter_update_and_reset_test(
meter, model_outputs, targets, expected_value, is_train=False
)
def test_meter_invalid_model_output(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[3, 2, 1], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Target has 3 dimensions instead of expected 1 or 2
target = torch.tensor([[[0, 1, 2], [0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
# Target of clips from the same video is not consistent
target = torch.tensor([0, 2, 1, 1, 2, 2])
self.meter_invalid_update_test(meter, model_output, target, is_train=False)
def test_meter_invalid_topk(self):
meter = VideoAccuracyMeter(
topk=[1, 5], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
# Expected value is the expected value of meter1
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 2 is the correct class for sample 1, class 0 for sample 2, etc
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# Value for second update
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value, is_train=False
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
]
# For meter 0, class 2 is the correct class for sample 1, class 0 for sample 2,
# etc
targets = [
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
]
# In first two updates there are 3 correct top-2, 5 correct in top 2
# The same occurs in the second two updates and is added to first
expected_values = [
{"top_1": 1 / 6.0, "top_2": 4 / 6.0}, # After one update to each meter
{"top_1": 2 / 12.0, "top_2": 8 / 12.0}, # After two updates to each meter
]
self.meter_distributed_test(
meters, model_outputs, targets, expected_values, is_train=False
)
| ClassyVision-main | test/meters_video_accuracy_meter_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import Mock
from classy_vision.dataset import build_dataset
from classy_vision.hooks import ClassyHook
from classy_vision.losses import build_loss
from classy_vision.models import build_model
from classy_vision.optim import build_optimizer, build_optimizer_schedulers
from classy_vision.optim.param_scheduler import (
ClassyParamScheduler,
register_param_scheduler,
UpdateInterval,
)
from classy_vision.tasks import ClassificationTask, ClassyTask
from classy_vision.trainer import LocalTrainer
@register_param_scheduler("test_scheduler_where")
class TestParamSchedulerWhere(ClassyParamScheduler):
def __init__(self):
self.update_interval = UpdateInterval.STEP
def __call__(self, where):
return where
@classmethod
def from_config(cls, cfg):
return cls()
@register_param_scheduler("test_scheduler_where_double")
class TestParamSchedulerWhereDouble(ClassyParamScheduler):
def __init__(self):
self.update_interval = UpdateInterval.EPOCH
def __call__(self, where):
return where * 2
@classmethod
def from_config(cls, cfg):
return cls()
class TestParamSchedulerIntegration(unittest.TestCase):
def _get_optimizer_config(self, skip_param_schedulers=False):
optimizer_config = {"name": "sgd", "num_epochs": 10, "momentum": 0.9}
if not skip_param_schedulers:
optimizer_config["param_schedulers"] = {
"lr": {"name": "test_scheduler_where"},
"weight_decay": {"name": "test_scheduler_where_double"},
}
return optimizer_config
def _get_config(self, skip_param_schedulers=False):
return {
"loss": {"name": "CrossEntropyLoss"},
"dataset": {
"train": {
"name": "synthetic_image",
"split": "train",
"num_classes": 2,
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 10,
"seed": 0,
"batchsize_per_replica": 5,
"use_shuffle": True,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
"test": {
"name": "synthetic_image",
"split": "test",
"num_classes": 2,
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 10,
"seed": 0,
"batchsize_per_replica": 5,
"use_shuffle": False,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
},
"model": {
"name": "mlp",
# 3x20x20 = 1200
"input_dim": 1200,
"output_dim": 1000,
"hidden_dims": [10],
},
"meters": {"accuracy": {"topk": [1]}},
"optimizer": self._get_optimizer_config(skip_param_schedulers),
}
def _build_task(self, num_epochs, skip_param_schedulers=False):
config = self._get_config(skip_param_schedulers)
config["optimizer"]["num_epochs"] = num_epochs
task = (
ClassificationTask()
.set_num_epochs(num_epochs)
.set_loss(build_loss(config["loss"]))
.set_model(build_model(config["model"]))
.set_optimizer(build_optimizer(config["optimizer"]))
.set_optimizer_schedulers(build_optimizer_schedulers(config["optimizer"]))
)
for phase_type in ["train", "test"]:
dataset = build_dataset(config["dataset"][phase_type])
task.set_dataset(dataset, phase_type)
self.assertTrue(task is not None)
return task
def test_param_scheduler_epoch(self):
task = self._build_task(num_epochs=3)
where_list = []
class SchedulerMock(ClassyParamScheduler):
def __call__(self, where):
where_list.append(where)
return 0.1
mock = SchedulerMock(UpdateInterval.EPOCH)
task.set_optimizer_schedulers({"lr": mock})
trainer = LocalTrainer()
trainer.train(task)
self.assertEqual(where_list, [0, 1 / 3, 2 / 3])
def test_param_scheduler_step(self):
task = self._build_task(num_epochs=3)
where_list = []
class SchedulerMock(ClassyParamScheduler):
def __call__(self, where):
where_list.append(where)
return 0.1
mock = SchedulerMock(UpdateInterval.STEP)
task.set_optimizer_schedulers({"lr": mock})
trainer = LocalTrainer()
trainer.train(task)
# We have 10 samples, batch size is 5. Each epoch is done in two steps.
# The first call is the initialization and the second call is inside the step()
self.assertEqual(where_list, [0, 0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6])
def test_no_param_schedulers(self):
task = self._build_task(num_epochs=3, skip_param_schedulers=True)
# there should be no param schedulers
self.assertEqual(task.optimizer_schedulers, {})
# we should still be able to train the task
trainer = LocalTrainer()
trainer.train(task)
def test_hook(self):
task = self._build_task(num_epochs=3)
lr_list = []
weight_decay_list = []
momentum_list = []
test_instance = self
class TestHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def on_step(self, task: ClassyTask) -> None:
if not task.train:
return
# make sure we have non-zero param groups
test_instance.assertGreater(len(task.optimizer.param_groups), 0)
lr_list.append(task.optimizer.options_view.lr)
weight_decay_list.append(task.optimizer.options_view.weight_decay)
momentum_list.append(task.optimizer.options_view.momentum)
task.set_hooks([TestHook()])
trainer = LocalTrainer()
trainer.train(task)
# We have 10 samples, batch size is 5. Each epoch takes two steps. So,
# there will be a total of 6 steps.
# the lr scheduler uses a step update interval
self.assertEqual(lr_list, [0 / 6, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6])
# the weight decay scheduler uses an epoch update interval
self.assertEqual(weight_decay_list, [0 / 6, 0 / 6, 4 / 6, 4 / 6, 8 / 6, 8 / 6])
self.assertEqual(momentum_list, [0.9, 0.9, 0.9, 0.9, 0.9, 0.9])
def test_update_interval_from_config(self):
# test a config which specifies an update interval
config = {"update_interval": "epoch"}
self.assertEqual(
UpdateInterval.from_config(config, UpdateInterval.STEP),
UpdateInterval.EPOCH,
)
# test a config which doesn't specify an update interval
config = {}
self.assertEqual(
UpdateInterval.from_config(config, UpdateInterval.STEP), UpdateInterval.STEP
)
# test a config with an invalid update interval
config = {"update_interval": "invalid"}
with self.assertRaises(Exception):
UpdateInterval.from_config(config, UpdateInterval.EPOCH)
| ClassyVision-main | test/optim_param_scheduler_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import AccuracyMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestAccuracyMeter(ClassificationMeterTest):
def test_accuracy_meter_registry(self):
accuracy_meter = meters.build_meter({"name": "accuracy", "topk": [1, 2]})
self.assertTrue(isinstance(accuracy_meter, AccuracyMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
target = torch.tensor([0, 1, 2])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 2 / 3.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_double_meter_update_and_reset(self):
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score...two batches in this test
model_outputs = [
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]),
torch.tensor([[3, 2, 1], [1, 3, 2], [1, 3, 2]]),
]
# Class 0 is the correct class for sample 1, class 2 for
# sample 2, etc, in both batches
targets = [torch.tensor([0, 1, 2]), torch.tensor([0, 1, 2])]
# First batch has top-1 accuracy of 1/3.0, top-2 accuracy of 2/3.0
# Second batch has top-1 accuracy of 2/3.0, top-2 accuracy of 3/3.0
expected_value = {"top_1": 3 / 6.0, "top_2": 5 / 6.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_single_meter_update_and_reset_onehot(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update with onehot target.
"""
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
target = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 2 / 3.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_single_meter_update_and_reset_multilabel(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update with multilabel target.
"""
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 7, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_output = torch.tensor(
[
[3, 2, 1],
[3, 1, 2],
[1, 3, 2],
[1, 2, 3],
[2, 1, 3],
[2, 3, 1],
[1, 3, 2],
]
)
target = torch.tensor(
[
[1, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[0, 1, 1],
[1, 1, 1],
[1, 0, 1],
]
)
# 1st, 4th, 5th, 6th sample has top class correct, 2nd and 7th have at least
# one correct class in top 2.
expected_value = {"top_1": 4 / 7.0, "top_2": 6 / 7.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_meter_invalid_model_output(self):
meter = AccuracyMeter(topk=[1, 2])
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[3, 2, 1], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]]
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = AccuracyMeter(topk=[1, 2])
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
# Target has 3 dimensions instead of expected 1 or 2
target = torch.tensor([[[0, 1, 2], [0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_topk(self):
meter = AccuracyMeter(topk=[1, 5])
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
# Expected value is the expected value of meter1
meters = [AccuracyMeter(topk=[1, 2]), AccuracyMeter(topk=[1, 2])]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor([[1, 2, 3], [1, 2, 3], [2, 3, 1]]),
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]),
]
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
targets = [torch.tensor([0, 1, 2]), torch.tensor([0, 1, 2])]
# Value for second update
expected_value = {"top_1": 1 / 3.0, "top_2": 2 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [AccuracyMeter(topk=[1, 2]), AccuracyMeter(topk=[1, 2])]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]), # Meter 0
torch.tensor([[3, 2, 1], [1, 3, 2], [1, 3, 2]]), # Meter 1
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]), # Meter 0
torch.tensor([[3, 2, 1], [1, 3, 2], [1, 3, 2]]), # Meter 1
]
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
targets = [
torch.tensor([0, 1, 2]), # Meter 0
torch.tensor([0, 1, 2]), # Meter 1
torch.tensor([0, 1, 2]), # Meter 0
torch.tensor([0, 1, 2]), # Meter 1
]
# In first two updates there are 3 correct top-2, 5 correct in top 2
# The same occurs in the second two updates and is added to first
expected_values = [
{"top_1": 3 / 6.0, "top_2": 5 / 6.0}, # After one update to each meter
{"top_1": 6 / 12.0, "top_2": 10 / 12.0}, # After two updates to each meter
]
self.meter_distributed_test(meters, model_outputs, targets, expected_values)
| ClassyVision-main | test/meters_accuracy_meter_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
from functools import partial
import torch
from classy_vision.generic.util import get_torch_version
from classy_vision.heads import build_head
from classy_vision.heads.fully_connected_head import FullyConnectedHead
from test.generic.utils import ClassyTestCase
class TestFullyConnectedHead(ClassyTestCase):
def test_fully_connected_head(self):
batch_size = 2
in_plane = 3
image_size = 4
num_classes = 5
head = FullyConnectedHead(
"default_head",
num_classes=num_classes,
in_plane=in_plane,
)
input = torch.rand([batch_size, in_plane, image_size, image_size])
output = head(input)
self.assertEqual(output.shape, torch.Size([batch_size, num_classes]))
def test_fully_connected_head_normalize_inputs(self):
batch_size = 2
in_plane = 3
image_size = 4
head = FullyConnectedHead(
"default_head",
in_plane=in_plane,
normalize_inputs="l2",
num_classes=None,
)
input = torch.rand([batch_size, in_plane, image_size, image_size])
output = head(input)
self.assertEqual(output.shape, torch.Size([batch_size, in_plane]))
for i in range(batch_size):
output_i = output[i]
self.assertAlmostEqual(output_i.norm().item(), 1, delta=1e-5)
# test that the grads will be the same when using normalization as when
# normalizing an input and passing it to the head without normalize_inputs.
# use input with a norm > 1 and make image_size = 1 so that average
# pooling is a no op
image_size = 1
input = 2 + torch.rand([batch_size, in_plane, image_size, image_size])
norm_func = (
torch.linalg.norm
if get_torch_version() >= [1, 7]
else partial(torch.norm, p=2)
)
norms = norm_func(input.view(batch_size, -1), dim=1)
normalized_input = torch.clone(input)
for i in range(batch_size):
normalized_input[i] /= norms[i]
num_classes = 10
head_norm = FullyConnectedHead(
"default_head",
in_plane=in_plane,
normalize_inputs="l2",
num_classes=num_classes,
)
head_no_norm = FullyConnectedHead(
"default_head",
in_plane=in_plane,
num_classes=num_classes,
)
# share the weights between the heads
head_norm.load_state_dict(copy.deepcopy(head_no_norm.state_dict()))
# use the sum of the output as the loss and perform a backward
head_no_norm(normalized_input).sum().backward()
head_norm(input).sum().backward()
for param_1, param_2 in zip(head_norm.parameters(), head_no_norm.parameters()):
self.assertTorchAllClose(param_1, param_2)
self.assertTorchAllClose(param_1.grad, param_2.grad)
def test_conv_planes(self):
num_classes = 10
in_plane = 3
conv_planes = 5
batch_size = 2
image_size = 4
head_config = {
"name": "fully_connected",
"unique_id": "asd",
"in_plane": in_plane,
"conv_planes": conv_planes,
"num_classes": num_classes,
}
head = build_head(head_config)
self.assertIsInstance(head, FullyConnectedHead)
# specify an activation
head_config["activation"] = "relu"
head = build_head(head_config)
# make sure that the head runs and returns the expected dimensions
input = torch.rand([batch_size, in_plane, image_size, image_size])
output = head(input)
self.assertEqual(output.shape, (batch_size, num_classes))
| ClassyVision-main | test/heads_fully_connected_head_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.optim.param_scheduler import LinearParamScheduler
from classy_vision.optim.sgd import SGD
from test.generic.optim_test_util import TestOptimizer
class TestSGDOptimizer(TestOptimizer, unittest.TestCase):
def _get_config(self):
return {
"name": "sgd",
"num_epochs": 90,
"lr": 0.1,
"momentum": 0.9,
"weight_decay": 0.0001,
"nesterov": False,
}
def _instance_to_test(self):
return SGD
# This test relies on the SGD update equations, which is why it's not in
# the base class TestOptimizer
def test_lr_step(self):
opt = SGD()
param = torch.tensor([0.0], requires_grad=True)
opt.set_param_groups([param], lr=LinearParamScheduler(1, 2))
param.grad = torch.tensor([1.0])
self.assertAlmostEqual(opt.options_view.lr, 1.0)
# lr=1, param should go from 0 to -1
opt.step(where=0)
self.assertAlmostEqual(opt.options_view.lr, 1.0)
self.assertAlmostEqual(param.item(), -1.0, delta=1e-5)
# lr=1.5, param should go from -1 to -1-1.5 = -2.5
opt.step(where=0.5)
self.assertAlmostEqual(param.item(), -2.5, delta=1e-5)
# lr=1.9, param should go from -2.5 to -1.9-2.5 = -4.4
opt.step(where=0.9)
self.assertAlmostEqual(param.item(), -4.4, delta=1e-5)
| ClassyVision-main | test/optim_sgd_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import shutil
import tempfile
import unittest
import torch
import torch.nn as nn
from classy_vision.dataset import build_dataset
from classy_vision.generic.distributed_util import is_distributed_training_run
from classy_vision.generic.util import get_checkpoint_dict, get_torch_version
from classy_vision.hooks import CheckpointHook, LossLrMeterLoggingHook
from classy_vision.losses import build_loss, ClassyLoss, register_loss
from classy_vision.models import build_model, ClassyModel
from classy_vision.optim import build_optimizer, SGD
from classy_vision.tasks import build_task, ClassificationTask
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_fast_test_task_config, get_test_task_config
from test.generic.utils import (
compare_model_state,
compare_samples,
compare_states,
LimitedPhaseTrainer,
)
@register_loss("test_stateful_loss")
class TestStatefulLoss(ClassyLoss):
def __init__(self, in_plane):
super(TestStatefulLoss, self).__init__()
self.alpha = torch.nn.Parameter(torch.Tensor(in_plane, 2))
torch.nn.init.xavier_normal(self.alpha)
@classmethod
def from_config(cls, config) -> "TestStatefulLoss":
return cls(in_plane=config["in_plane"])
def forward(self, output, target):
value = output.matmul(self.alpha)
loss = torch.mean(torch.abs(value))
return loss
# Generate a simple model that has a very high gradient w.r.t. to this
# loss
class SimpleModel(ClassyModel):
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.tensor(5.0), requires_grad=True)
def forward(self, x):
return x + self.param
@classmethod
def from_config(cls):
return cls()
class SimpleLoss(nn.Module):
def forward(self, x, y):
return x.pow(2).mean()
class TestClassificationTask(unittest.TestCase):
def _compare_model_state(self, model_state_1, model_state_2, check_heads=True):
compare_model_state(self, model_state_1, model_state_2, check_heads)
def _compare_samples(self, sample_1, sample_2):
compare_samples(self, sample_1, sample_2)
def _compare_states(self, state_1, state_2, check_heads=True):
compare_states(self, state_1, state_2)
def setUp(self):
# create a base directory to write checkpoints to
self.base_dir = tempfile.mkdtemp()
def tearDown(self):
# delete all the temporary data created
shutil.rmtree(self.base_dir)
def test_build_task(self):
config = get_test_task_config()
task = build_task(config)
self.assertTrue(isinstance(task, ClassificationTask))
def test_hooks_config_builds_correctly(self):
config = get_test_task_config()
config["hooks"] = [{"name": "loss_lr_meter_logging"}]
task = build_task(config)
self.assertTrue(len(task.hooks) == 1)
self.assertTrue(isinstance(task.hooks[0], LossLrMeterLoggingHook))
def test_get_state(self):
config = get_test_task_config()
loss = build_loss(config["loss"])
task = (
ClassificationTask()
.set_num_epochs(1)
.set_loss(loss)
.set_model(build_model(config["model"]))
.set_optimizer(build_optimizer(config["optimizer"]))
)
for phase_type in ["train", "test"]:
dataset = build_dataset(config["dataset"][phase_type])
task.set_dataset(dataset, phase_type)
task.prepare()
task = build_task(config)
task.prepare()
def test_synchronize_losses_non_distributed(self):
"""
Tests that synchronize losses has no side effects in a non-distributed setting.
"""
test_config = get_fast_test_task_config()
task = build_task(test_config)
task.prepare()
old_losses = copy.deepcopy(task.losses)
task.synchronize_losses()
self.assertEqual(old_losses, task.losses)
def test_synchronize_losses_when_losses_empty(self):
config = get_fast_test_task_config()
task = build_task(config)
task.prepare()
task.set_use_gpu(torch.cuda.is_available())
# Losses should be empty when creating task
self.assertEqual(len(task.losses), 0)
task.synchronize_losses()
def test_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the train_steps
run the same way after loading from a checkpoint.
"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task_2 = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task.set_use_gpu(torch.cuda.is_available())
# only train 1 phase at a time
trainer = LimitedPhaseTrainer(num_phases=1)
while not task.done_training():
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
# task 2 should have the same state before training
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
# train for one phase
trainer.train(task)
trainer.train(task_2)
# task 2 should have the same state after training
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
def test_final_train_checkpoint(self):
"""Test that a train phase checkpoint with a where of 1.0 can be loaded"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks(
[CheckpointHook(self.base_dir, {}, phase_types=["train"])]
)
task_2 = build_task(config)
task.set_use_gpu(torch.cuda.is_available())
trainer = LocalTrainer()
trainer.train(task)
self.assertAlmostEqual(task.where, 1.0, delta=1e-3)
# set task_2's state as task's final train checkpoint
task_2.set_checkpoint(self.base_dir)
task_2.prepare()
# we should be able to train the task
trainer.train(task_2)
def test_test_only_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the
train_steps run the same way after loading from a training
task checkpoint on a test_only task.
"""
train_config = get_fast_test_task_config()
train_config["num_epochs"] = 10
test_config = get_fast_test_task_config()
test_config["test_only"] = True
train_task = build_task(train_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
# prepare the tasks for the right device
train_task.prepare()
# test in both train and test mode
trainer = LocalTrainer()
trainer.train(train_task)
# set task's state as task_2's checkpoint
test_only_task._set_checkpoint_dict(
get_checkpoint_dict(train_task, {}, deep_copy=True)
)
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect the phase idx to be different for a test only task
self.assertEqual(test_state["phase_idx"], -1)
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# train_phase_idx should -1
self.assertEqual(test_state["train_phase_idx"], -1)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_test_only_task(self):
"""
Tests the task in test mode by running train_steps
to make sure the train_steps run as expected on a
test_only task
"""
test_config = get_fast_test_task_config()
test_config["test_only"] = True
# delete train dataset
del test_config["dataset"]["train"]
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_train_only_task(self):
"""
Tests that the task runs when only a train dataset is specified.
"""
test_config = get_fast_test_task_config()
# delete the test dataset from the config
del test_config["dataset"]["test"]
task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
task.prepare()
# verify the the task can still be trained
trainer = LocalTrainer()
trainer.train(task)
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_checkpointing_different_device(self):
config = get_fast_test_task_config()
task = build_task(config)
task_2 = build_task(config)
for use_gpu in [True, False]:
task.set_use_gpu(use_gpu)
task.prepare()
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
# we should be able to run the trainer using state from a different device
trainer = LocalTrainer()
task_2.set_use_gpu(not use_gpu)
trainer.train(task_2)
@unittest.skipUnless(
is_distributed_training_run(), "This test needs a distributed run"
)
def test_get_classy_state_on_loss(self):
config = get_fast_test_task_config()
config["loss"] = {"name": "test_stateful_loss", "in_plane": 256}
task = build_task(config)
task.prepare()
self.assertIn("alpha", task.get_classy_state()["loss"])
def test_gradient_clipping(self):
apex_available = True
try:
import apex # noqa F401
except ImportError:
apex_available = False
def train_with_clipped_gradients(amp_args=None):
task = build_task(get_fast_test_task_config())
task.set_num_epochs(1)
task.set_model(SimpleModel())
task.set_loss(SimpleLoss())
task.set_meters([])
task.set_use_gpu(torch.cuda.is_available())
task.set_clip_grad_norm(0.5)
task.set_amp_args(amp_args)
task.set_optimizer(SGD(lr=1))
trainer = LocalTrainer()
trainer.train(task)
return task.model.param.grad.norm()
grad_norm = train_with_clipped_gradients(None)
self.assertAlmostEqual(grad_norm, 0.5, delta=1e-2)
if apex_available and torch.cuda.is_available():
grad_norm = train_with_clipped_gradients({"opt_level": "O2"})
self.assertAlmostEqual(grad_norm, 0.5, delta=1e-2)
def test_clip_stateful_loss(self):
config = get_fast_test_task_config()
config["loss"] = {"name": "test_stateful_loss", "in_plane": 256}
config["grad_norm_clip"] = grad_norm_clip = 1
task = build_task(config)
task.set_use_gpu(False)
task.prepare()
# set fake gradients with norm > grad_norm_clip
for param in itertools.chain(
task.base_model.parameters(), task.base_loss.parameters()
):
param.grad = 1.1 + torch.rand(param.shape)
self.assertGreater(param.grad.norm(), grad_norm_clip)
task._clip_gradients(grad_norm_clip)
for param in itertools.chain(
task.base_model.parameters(), task.base_loss.parameters()
):
self.assertLessEqual(param.grad.norm(), grad_norm_clip)
# helper used by gradient accumulation tests
def train_with_batch(self, simulated_bs, actual_bs, clip_grad_norm=None):
config = copy.deepcopy(get_fast_test_task_config())
config["dataset"]["train"]["num_samples"] = 12
config["dataset"]["train"]["batchsize_per_replica"] = actual_bs
del config["dataset"]["test"]
task = build_task(config)
task.set_num_epochs(1)
task.set_model(SimpleModel())
task.set_loss(SimpleLoss())
task.set_meters([])
task.set_use_gpu(torch.cuda.is_available())
if simulated_bs is not None:
task.set_simulated_global_batchsize(simulated_bs)
if clip_grad_norm is not None:
task.set_clip_grad_norm(clip_grad_norm)
task.set_optimizer(SGD(lr=1))
trainer = LocalTrainer()
trainer.train(task)
return task.model.param
def test_gradient_accumulation(self):
param_with_accumulation = self.train_with_batch(simulated_bs=4, actual_bs=2)
param = self.train_with_batch(simulated_bs=4, actual_bs=4)
self.assertAlmostEqual(param_with_accumulation, param, delta=1e-5)
def test_gradient_accumulation_and_clipping(self):
param = self.train_with_batch(simulated_bs=6, actual_bs=2, clip_grad_norm=0.1)
# param starts at 5, it has to decrease, LR = 1
# clipping the grad to 0.1 means we drop 0.1 per update. num_samples =
# 12 and the simulated batch size is 6, so we should do 2 updates: 5 ->
# 4.9 -> 4.8
self.assertAlmostEqual(param, 4.8, delta=1e-5)
@unittest.skipIf(
get_torch_version() < [1, 8],
"FP16 Grad compression is only available from PyTorch 1.8",
)
def test_fp16_grad_compression(self):
# there is no API defined to check that a DDP hook has been enabled, so we just
# test that we set the right variables
config = copy.deepcopy(get_fast_test_task_config())
task = build_task(config)
self.assertFalse(task.fp16_grad_compress)
config.setdefault("distributed", {})
config["distributed"]["fp16_grad_compress"] = True
task = build_task(config)
self.assertTrue(task.fp16_grad_compress)
| ClassyVision-main | test/tasks_classification_task_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
StepParamScheduler,
)
class TestStepScheduler(unittest.TestCase):
_num_epochs = 12
def _get_valid_config(self):
return {
"name": "step",
"num_epochs": self._num_epochs,
"values": [0.1, 0.01, 0.001, 0.0001],
}
def test_invalid_config(self):
# Invalid num epochs
config = self._get_valid_config()
bad_config = copy.deepcopy(config)
bad_config["num_epochs"] = -1
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
# Invalid Values
bad_config["num_epochs"] = config["num_epochs"]
del bad_config["values"]
with self.assertRaises(TypeError):
StepParamScheduler.from_config(bad_config)
bad_config["values"] = {"a": "b"}
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
bad_config["values"] = []
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
def test_scheduler(self):
config = self._get_valid_config()
scheduler = StepParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
0.1,
0.1,
0.1,
0.01,
0.01,
0.01,
0.001,
0.001,
0.001,
0.0001,
0.0001,
0.0001,
]
self.assertEqual(schedule, expected_schedule)
def test_build_step_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, StepParamScheduler))
| ClassyVision-main | test/optim_param_scheduler_step_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
import torchvision.transforms._transforms_video as transforms_video
from classy_vision.dataset.core.random_video_datasets import RandomVideoDataset
from classy_vision.dataset.transforms.util_video import (
build_video_field_transform_default,
VideoConstants,
)
class DatasetTransformUtilVideoTest(unittest.TestCase):
def get_test_video_dataset(self):
self.num_classes = 10
self.split = "train"
self.num_samples = 100
self.frames_per_clip = 32
self.video_width = 320
self.video_height = 256
self.audio_samples = 44000
self.clips_per_video = 1
self.seed = 1
dataset = RandomVideoDataset(
self.num_classes,
self.split,
self.num_samples,
self.frames_per_clip,
self.video_width,
self.video_height,
self.audio_samples,
self.clips_per_video,
self.seed,
)
return dataset
def test_build_field_transform_default_video(self):
dataset = self.get_test_video_dataset()
# transform config is not provided. Use default transforms
config = None
# default training data transform
sample = dataset[0]
transform = build_video_field_transform_default(config, "train")
output_clip = transform(sample)["input"]["video"]
self.assertEqual(
output_clip.size(),
torch.Size(
(
3,
self.frames_per_clip,
VideoConstants.CROP_SIZE,
VideoConstants.CROP_SIZE,
)
),
)
# default testing data transform
sample = dataset[1]
sample_copy = copy.deepcopy(sample)
expected_output_clip = transforms_video.ToTensorVideo()(
sample["input"]["video"]
)
expected_output_clip = transforms_video.CenterCropVideo(
VideoConstants.CROP_SIZE
)(expected_output_clip)
expected_output_clip = transforms_video.NormalizeVideo(
mean=VideoConstants.MEAN, std=VideoConstants.STD
)(expected_output_clip)
transform = build_video_field_transform_default(config, "test")
output_clip = transform(sample_copy)["input"]["video"]
rescaled_width = int(
VideoConstants.SIZE_RANGE[0] * self.video_width / self.video_height
)
self.assertEqual(
output_clip.size(),
torch.Size(
(3, self.frames_per_clip, VideoConstants.SIZE_RANGE[0], rescaled_width)
),
)
# transform config is provided. Simulate training config
sample = dataset[2]
config = {
"video": [
{"name": "ToTensorVideo"},
{
"name": "video_clip_random_resize_crop",
"crop_size": 64,
"size_range": [256, 320],
},
{"name": "RandomHorizontalFlipVideo"},
{
"name": "NormalizeVideo",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
]
}
transform = build_video_field_transform_default(config, "train")
output_clip = transform(sample)["input"]["video"]
self.assertEqual(
output_clip.size(), torch.Size((3, self.frames_per_clip, 64, 64))
)
self.assertTrue(output_clip.dtype == torch.float)
# transform config is provided. Simulate testing config
sample = dataset[3]
config = {
"video": [
{"name": "ToTensorVideo"},
{"name": "video_clip_resize", "size": 64},
{
"name": "NormalizeVideo",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
]
}
transform = build_video_field_transform_default(config, "train")
output_clip = transform(sample)["input"]["video"]
rescaled_width = int(64 * self.video_width / self.video_height)
self.assertEqual(
output_clip.size(),
torch.Size((3, self.frames_per_clip, 64, rescaled_width)),
)
self.assertTrue(output_clip.dtype == torch.float)
| ClassyVision-main | test/dataset_transforms_util_video_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
MultiStepParamScheduler,
)
class TestMultiStepParamScheduler(unittest.TestCase):
_num_epochs = 12
def _get_valid_config(self):
return {
"name": "multistep",
"num_epochs": self._num_epochs,
"values": [0.1, 0.01, 0.001, 0.0001],
"milestones": [4, 6, 8],
}
def test_invalid_config(self):
# Invalid num epochs
config = self._get_valid_config()
bad_config = copy.deepcopy(config)
bad_config["num_epochs"] = -1
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
# Invalid values
bad_config["num_epochs"] = config["num_epochs"]
del bad_config["values"]
with self.assertRaises((AssertionError, TypeError)):
MultiStepParamScheduler.from_config(bad_config)
bad_config["values"] = {"a": "b"}
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
bad_config["values"] = []
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
# Invalid drop epochs
bad_config["values"] = config["values"]
bad_config["milestones"] = {"a": "b"}
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
# Too many
bad_config["milestones"] = [3, 6, 8, 12]
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
# Too few
bad_config["milestones"] = [3, 6]
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
# Exceeds num_epochs
bad_config["milestones"] = [3, 6, 12]
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
# Out of order
bad_config["milestones"] = [3, 8, 6]
with self.assertRaises(ValueError):
MultiStepParamScheduler.from_config(bad_config)
def _test_config_scheduler(self, config, expected_schedule):
scheduler = MultiStepParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
self.assertEqual(schedule, expected_schedule)
def test_scheduler(self):
config = self._get_valid_config()
expected_schedule = [
0.1,
0.1,
0.1,
0.1,
0.01,
0.01,
0.001,
0.001,
0.0001,
0.0001,
0.0001,
0.0001,
]
self._test_config_scheduler(config, expected_schedule)
def test_default_config(self):
config = self._get_valid_config()
default_config = copy.deepcopy(config)
# Default equispaced drop_epochs behavior
del default_config["milestones"]
expected_schedule = [
0.1,
0.1,
0.1,
0.01,
0.01,
0.01,
0.001,
0.001,
0.001,
0.0001,
0.0001,
0.0001,
]
self._test_config_scheduler(default_config, expected_schedule)
def test_build_non_equi_step_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, MultiStepParamScheduler))
| ClassyVision-main | test/optim_param_scheduler_multi_step_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import unittest
import torch
import torch.nn as nn
from classy_vision.generic.util import get_torch_version
from classy_vision.models import build_model, RegNet
from parameterized import parameterized
# Test the different exposed parameters, even if not present in the
# actual checked in configurations
REGNET_TEST_CONFIGS = [
(
{
# RegNetY
"name": "regnet",
"bn_epsilon": 1e-05, # optional
"bn_momentum": 0.1, # optional
"stem_type": "simple_stem_in", # optional
"stem_width": 32, # optional
"block_type": "res_bottleneck_block", # optional
"depth": 22,
"w_0": 24,
"w_a": 24.48,
"w_m": 2.54,
"group_width": 16,
"use_se": True, # optional
"se_ratio": 0.25, # optional
},
),
(
{
# RegNetX-like (no se)
"name": "regnet",
"depth": 22,
"w_0": 24,
"w_a": 24.48,
"w_m": 2.54,
"group_width": 16,
"use_se": False, # optional
},
),
(
{
# RegNetY, different block
"name": "regnet",
"depth": 22,
"w_0": 24,
"w_a": 24.48,
"w_m": 2.54,
"group_width": 16,
"block_type": "vanilla_block", # optional
},
),
(
{
# RegNetY, different block
"name": "regnet",
"depth": 22,
"w_0": 24,
"w_a": 24.48,
"w_m": 2.54,
"group_width": 16,
"block_type": "res_basic_block", # optional
},
),
(
{
# RegNetY, different stem
"name": "regnet",
"depth": 22,
"w_0": 24,
"w_a": 24.48,
"w_m": 2.54,
"group_width": 16,
"stem_type": "res_stem_cifar", # optional
},
),
(
{
# RegNetY, different stem
"name": "regnet",
"depth": 22,
"w_0": 24,
"w_a": 24.48,
"w_m": 2.54,
"group_width": 16,
"stem_type": "res_stem_in", # optional
},
),
(
{
# Default minimal param set
"name": "regnet",
"depth": 17,
"w_0": 192,
"w_a": 76.82,
"w_m": 2.19,
"group_width": 56,
},
),
(
{
# RegNetZ
"name": "regnet",
"block_type": "res_bottleneck_linear_block",
"depth": 21,
"w_0": 16,
"w_a": 10.7,
"w_m": 2.51,
"group_width": 4,
"bot_mul": 4.0,
"activation": "silu",
},
),
]
REGNET_TEST_PRESET_NAMES = [
"regnet_y_400mf",
"regnet_y_800mf",
"regnet_y_1.6gf",
"regnet_y_3.2gf",
"regnet_y_8gf",
"regnet_y_16gf",
"regnet_y_32gf",
"regnet_y_64gf",
"regnet_y_128gf",
"regnet_y_256gf",
"regnet_x_400mf",
"regnet_x_800mf",
"regnet_x_1.6gf",
"regnet_x_3.2gf",
"regnet_x_8gf",
"regnet_x_16gf",
"regnet_x_32gf",
"regnet_z_500mf",
"regnet_z_4gf",
]
REGNET_TEST_PRESETS = [({"name": n},) for n in REGNET_TEST_PRESET_NAMES]
class TestRegNetModelBuild(unittest.TestCase):
@parameterized.expand(REGNET_TEST_CONFIGS + REGNET_TEST_PRESETS)
def test_build_model(self, config):
"""
Test that the model builds using a config using either model_params or
model_name.
"""
if get_torch_version() < [1, 7] and (
"regnet_z" in config["name"] or config.get("activation", "relu") == "silu"
):
self.skipTest("SiLU activation is only supported since PyTorch 1.7")
model = build_model(config)
assert isinstance(model, RegNet)
@parameterized.expand(REGNET_TEST_CONFIGS + REGNET_TEST_PRESETS)
def test_quantize_model(self, config):
"""
Test that the model builds using a config using either model_params or
model_name and calls fx graph mode quantization apis
"""
if get_torch_version() < [1, 13]:
self.skipTest(
"This test is using a new api of FX Graph Mode Quantization which is only available after 1.13"
)
import torch.ao.quantization as tq
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx
model = build_model(config)
assert isinstance(model, RegNet)
model.eval()
example_inputs = (torch.rand(1, 3, 3, 3),)
model.stem = prepare_fx(model.stem, {"": tq.default_qconfig}, example_inputs)
model.stem = convert_fx(model.stem)
class TestRegNetModelFW(unittest.TestCase):
@parameterized.expand(
[({"name": n},) for n in ["regnet_y_400mf", "regnet_x_400mf"]]
)
def test_model_forward(self, config):
"""
Test that a default forward pass succeeds and does something
"""
image_shape = (3, 224, 224)
num_images = (10,)
input_tensor = torch.randn(num_images + image_shape)
model = build_model(config)
output = model.forward(input_tensor)
# Just check that this tensor actually went through a forward
# pass of sorts, and was not somehow bounced back
logging.info(f"Model {config}: output shape {output.shape}")
assert output.shape[0] == num_images[0]
# Default presets output 7x7 feature maps for 224x224 inputs
assert output.shape[-1] == 7
assert output.shape[-2] == 7
class TestRegNet(unittest.TestCase):
def _compare_models(self, model_1, model_2, expect_same: bool):
if expect_same:
self.assertMultiLineEqual(repr(model_1), repr(model_2))
else:
self.assertNotEqual(repr(model_1), repr(model_2))
def swap_relu_with_silu(self, module):
for child_name, child in module.named_children():
if isinstance(child, nn.ReLU):
setattr(module, child_name, nn.SiLU())
else:
self.swap_relu_with_silu(child)
def _check_no_module_cls_in_model(self, module_cls, model):
for module in model.modules():
self.assertNotIsInstance(module, module_cls)
@unittest.skipIf(
get_torch_version() < [1, 7],
"SiLU activation is only supported since PyTorch 1.7",
)
def test_activation(self):
config = REGNET_TEST_CONFIGS[0][0]
model_default = build_model(config)
config = copy.deepcopy(config)
config["activation"] = "relu"
model_relu = build_model(config)
# both models should be the same
self._compare_models(model_default, model_relu, expect_same=True)
# we don't expect any silus in the model
self._check_no_module_cls_in_model(nn.SiLU, model_relu)
config["activation"] = "silu"
model_silu = build_model(config)
# the models should be different
self._compare_models(model_silu, model_relu, expect_same=False)
# swap out all relus with silus
self.swap_relu_with_silu(model_relu)
print(model_relu)
# both models should be the same
self._compare_models(model_relu, model_silu, expect_same=True)
# we don't expect any relus in the model
self._check_no_module_cls_in_model(nn.ReLU, model_relu)
| ClassyVision-main | test/models_regnet_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.dataset import build_dataset, register_dataset
from classy_vision.dataset.classy_video_dataset import (
ClassyVideoDataset,
MaxLengthClipSampler,
)
from classy_vision.dataset.core import ListDataset
from classy_vision.dataset.transforms.util_video import (
build_video_field_transform_default,
)
from torch.utils.data import Sampler
DUMMY_SAMPLES_1 = [
{
"input": {
"video": torch.randint(0, 256, (8, 3, 128, 128), dtype=torch.uint8),
"audio": torch.rand(1000, 1, dtype=torch.float32),
},
"target": torch.tensor([[0]]),
}
]
DUMMY_CONFIG = {
"name": "test_video_dataset",
"split": "train",
"batchsize_per_replica": 1,
"use_shuffle": True,
"num_samples": 1,
"frames_per_clip": 8,
"video_dir": "dummy_video_dir",
}
class MockClipSampler(Sampler):
def __init__(self, full_size=1000):
self.full_size = full_size
def __iter__(self):
indices = list(range(self.full_size))
return iter(indices)
def __len__(self):
return self.full_size
@register_dataset("test_video_dataset")
class TestVideoDataset(ClassyVideoDataset):
"""Test dataset for validating registry functions"""
def __init__(
self,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
samples,
):
self.samples = samples
input_tensors = [sample["input"] for sample in samples]
target_tensors = [sample["target"] for sample in samples]
dataset = ListDataset(input_tensors, target_tensors, loader=lambda x: x)
super(TestVideoDataset, self).__init__(
dataset,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
)
@classmethod
def from_config(cls, config, samples):
split = config.get("split")
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
) = cls.parse_config(config)
transform = build_video_field_transform_default(transform_config, split)
return cls(
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
samples,
)
class TestRegistryFunctions(unittest.TestCase):
"""
Tests functions that use registry
"""
def test_build_dataset(self):
dataset = build_dataset(DUMMY_CONFIG, DUMMY_SAMPLES_1)
self.assertTrue(isinstance(dataset, TestVideoDataset))
class TestClassyVideoDataset(unittest.TestCase):
"""
Tests member functions of ClassyVideoDataset.
"""
def setUp(self):
self.dataset = build_dataset(DUMMY_CONFIG, DUMMY_SAMPLES_1)
def test_parse_config(self):
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
) = self.dataset.parse_config(DUMMY_CONFIG)
def test_max_length_clip_sampler(self):
clip_sampler = MockClipSampler(full_size=1000)
clip_sampler = MaxLengthClipSampler(clip_sampler, num_samples=64)
count = 0
for _clip_index in iter(clip_sampler):
count += 1
self.assertEqual(count, 64)
self.assertEqual(len(clip_sampler), 64)
| ClassyVision-main | test/dataset_classy_video_dataset_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import os
import shutil
import tempfile
import torch
from classy_vision.hooks import OutputCSVHook
from classy_vision.tasks import build_task
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_fast_test_task_config
from test.generic.hook_test_utils import HookTestBase
def parse_csv(file_path):
"""Parses the csv file and returns number of rows"""
num_rows = 0
with open(file_path, "r", newline="") as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
for _ in reader:
num_rows += 1
return num_rows
class TestCSVHook(HookTestBase):
def setUp(self) -> None:
self.base_dir = tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.base_dir)
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
folder = f"{self.base_dir}/constructor_test/"
os.makedirs(folder)
self.constructor_test_helper(
config={"folder": folder},
hook_type=OutputCSVHook,
hook_registry_name="output_csv",
invalid_configs=[],
)
def test_train(self) -> None:
for use_gpu in {False, torch.cuda.is_available()}:
folder = f"{self.base_dir}/train_test/{use_gpu}"
os.makedirs(folder)
task = build_task(get_fast_test_task_config(head_num_classes=2))
csv_hook = OutputCSVHook(folder)
task.set_hooks([csv_hook])
task.set_use_gpu(use_gpu)
trainer = LocalTrainer()
trainer.train(task)
self.assertEqual(parse_csv(csv_hook.output_path), 10)
| ClassyVision-main | test/hooks_output_csv_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
PolynomialDecayParamScheduler,
)
class TestPolynomialScheduler(unittest.TestCase):
_num_epochs = 10
def _get_valid_config(self):
return {
"name": "polynomial",
"num_epochs": self._num_epochs,
"base_value": 0.1,
"power": 1,
}
def test_invalid_config(self):
# Invalid num epochs
config = self._get_valid_config()
# Invalid Base lr
bad_config = copy.deepcopy(config)
del bad_config["base_value"]
with self.assertRaises((AssertionError, TypeError)):
PolynomialDecayParamScheduler.from_config(bad_config)
# Invalid Power
bad_config = copy.deepcopy(config)
del bad_config["power"]
with self.assertRaises((AssertionError, TypeError)):
PolynomialDecayParamScheduler.from_config(bad_config)
def test_scheduler(self):
config = self._get_valid_config()
scheduler = PolynomialDecayParamScheduler.from_config(config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 2)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.1, 0.09, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.01]
self.assertEqual(schedule, expected_schedule)
def test_build_polynomial_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, PolynomialDecayParamScheduler))
| ClassyVision-main | test/optim_param_scheduler_polynomial_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.losses import (
build_loss,
ClassyLoss,
MultiOutputSumLoss,
register_loss,
)
@register_loss("mock_1")
class MockLoss1(ClassyLoss):
def forward(self, pred, target):
return torch.tensor(1.0)
@classmethod
def from_config(cls, config):
return cls()
class TestMultiOutputSumLoss(unittest.TestCase):
def test_multi_output_sum_loss(self):
config = {"name": "multi_output_sum_loss", "loss": {"name": "mock_1"}}
crit = build_loss(config)
self.assertTrue(isinstance(crit, MultiOutputSumLoss))
# test with a single output
output = torch.tensor([1.0, 2.3])
target = torch.tensor(1.0)
self.assertAlmostEqual(crit(output, target).item(), 1.0)
# test with a list of outputs
output = [torch.tensor([1.2, 3.2])] * 5
target = torch.tensor(2.3)
self.assertAlmostEqual(crit(output, target).item(), 5.0)
| ClassyVision-main | test/losses_multi_output_sum_loss_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from typing import Any, Dict
from unittest import mock
import torch
import torch.nn as nn
from classy_vision.generic.util import get_checkpoint_dict
from classy_vision.losses import ClassyLoss, register_loss
from classy_vision.tasks import build_task, FineTuningTask
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_fast_test_task_config
from test.generic.utils import compare_model_state
from torch.nn.modules.loss import CrossEntropyLoss
@register_loss("batchnorm_cross_entropy_loss")
class BatchNormCrossEntropyLoss(ClassyLoss):
"""A special loss containing a BatchNorm module"""
def __init__(self, num_classes):
super().__init__()
self.bn = nn.BatchNorm1d(num_classes)
self.fc = nn.Linear(num_classes, num_classes)
self.xent = CrossEntropyLoss()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "BatchNormCrossEntropyLoss":
assert "num_classes" in config
return cls(config["num_classes"])
def forward(self, x, target):
return self.xent(self.fc(self.bn(x)), target)
class TestFineTuningTask(unittest.TestCase):
def _compare_model_state(self, state_1, state_2, check_heads=True):
return compare_model_state(self, state_1, state_2, check_heads=check_heads)
def _compare_state_dict(self, state_1, state_2, check_heads=True):
for k in state_1.keys():
self.assertTrue(torch.allclose(state_1[k].cpu(), state_2[k].cpu()))
def _get_fine_tuning_config(
self, head_num_classes=100, pretrained_checkpoint=False
):
config = get_fast_test_task_config(head_num_classes=head_num_classes)
config["name"] = "fine_tuning"
config["num_epochs"] = 2
if pretrained_checkpoint:
config["pretrained_checkpoint"] = "/path/to/pretrained/checkpoint"
return config
def _get_pre_train_config(self, head_num_classes=100):
config = get_fast_test_task_config(head_num_classes=head_num_classes)
config["num_epochs"] = 2
return config
def test_build_task(self):
config = self._get_fine_tuning_config()
task = build_task(config)
self.assertIsInstance(task, FineTuningTask)
config = self._get_fine_tuning_config(pretrained_checkpoint=True)
with mock.patch("classy_vision.tasks.FineTuningTask.set_pretrained_checkpoint"):
task = build_task(config)
self.assertIsInstance(task, FineTuningTask)
def test_prepare(self):
pre_train_config = self._get_pre_train_config()
pre_train_task = build_task(pre_train_config)
pre_train_task.prepare()
checkpoint = get_checkpoint_dict(pre_train_task, {})
fine_tuning_config = self._get_fine_tuning_config()
fine_tuning_task = build_task(fine_tuning_config)
# test: cannot prepare a fine tuning task without a pre-trained checkpoint
with self.assertRaises(Exception):
fine_tuning_task.prepare()
# test: prepare should succeed after pre-trained checkpoint is set
fine_tuning_task = build_task(fine_tuning_config)
fine_tuning_task._set_pretrained_checkpoint_dict(checkpoint)
fine_tuning_task.prepare()
# test: prepare should succeed if a pre-trained checkpoint is provided in the
# config
fine_tuning_config = self._get_fine_tuning_config(pretrained_checkpoint=True)
fine_tuning_task = build_task(fine_tuning_config)
with mock.patch(
"classy_vision.tasks.fine_tuning_task.load_and_broadcast_checkpoint",
return_value=checkpoint,
):
fine_tuning_task.prepare()
# test: a fine tuning task with incompatible heads with a manually set
# pre-trained checkpoint should fail to prepare if the heads are not reset
fine_tuning_config = self._get_fine_tuning_config(head_num_classes=10)
fine_tuning_task = build_task(fine_tuning_config)
fine_tuning_task._set_pretrained_checkpoint_dict(checkpoint)
with self.assertRaises(Exception):
fine_tuning_task.prepare()
# test: a fine tuning task with incompatible heads with a manually set
# pre-trained checkpoint should succeed to prepare if the heads are reset
fine_tuning_task = build_task(fine_tuning_config)
fine_tuning_task._set_pretrained_checkpoint_dict(
copy.deepcopy(checkpoint)
).set_reset_heads(True)
fine_tuning_task.prepare()
# test: a fine tuning task with incompatible heads with the pre-trained
# checkpoint provided in the config should fail to prepare
fine_tuning_config = self._get_fine_tuning_config(
head_num_classes=10, pretrained_checkpoint=True
)
fine_tuning_task = build_task(fine_tuning_config)
with mock.patch(
"classy_vision.tasks.fine_tuning_task.load_and_broadcast_checkpoint",
return_value=copy.deepcopy(checkpoint),
) and self.assertRaises(Exception):
fine_tuning_task.prepare()
# test: a fine tuning task with incompatible heads with the pre-trained
# checkpoint provided in the config should succeed to prepare if the heads are
# reset
fine_tuning_task = build_task(fine_tuning_config)
fine_tuning_task.set_reset_heads(True)
with mock.patch(
"classy_vision.tasks.fine_tuning_task.load_and_broadcast_checkpoint",
return_value=copy.deepcopy(checkpoint),
):
fine_tuning_task.prepare()
def test_train(self):
pre_train_config = self._get_pre_train_config(head_num_classes=100)
pre_train_task = build_task(pre_train_config)
trainer = LocalTrainer()
trainer.train(pre_train_task)
checkpoint = get_checkpoint_dict(pre_train_task, {})
for reset_heads, heads_num_classes in [(False, 100), (True, 20)]:
for freeze_trunk in [True, False]:
fine_tuning_config = self._get_fine_tuning_config(
head_num_classes=heads_num_classes
)
fine_tuning_task = build_task(fine_tuning_config)
fine_tuning_task = (
fine_tuning_task._set_pretrained_checkpoint_dict(
copy.deepcopy(checkpoint)
)
.set_reset_heads(reset_heads)
.set_freeze_trunk(freeze_trunk)
)
# run in test mode to compare the model state
fine_tuning_task.set_test_only(True)
trainer.train(fine_tuning_task)
self._compare_model_state(
pre_train_task.model.get_classy_state(),
fine_tuning_task.model.get_classy_state(),
check_heads=not reset_heads,
)
# run in train mode to check accuracy
fine_tuning_task.set_test_only(False)
trainer.train(fine_tuning_task)
if freeze_trunk:
# if trunk is frozen the states should be the same
self._compare_model_state(
pre_train_task.model.get_classy_state(),
fine_tuning_task.model.get_classy_state(),
check_heads=False,
)
else:
# trunk isn't frozen, the states should be different
with self.assertRaises(Exception):
self._compare_model_state(
pre_train_task.model.get_classy_state(),
fine_tuning_task.model.get_classy_state(),
check_heads=False,
)
accuracy = fine_tuning_task.meters[0].value["top_1"]
self.assertAlmostEqual(accuracy, 1.0)
def test_train_parametric_loss(self):
heads_num_classes = 100
pre_train_config = self._get_pre_train_config(
head_num_classes=heads_num_classes
)
pre_train_config["loss"] = {
"name": "batchnorm_cross_entropy_loss",
"num_classes": heads_num_classes,
}
pre_train_task = build_task(pre_train_config)
trainer = LocalTrainer()
trainer.train(pre_train_task)
checkpoint = get_checkpoint_dict(pre_train_task, {})
fine_tuning_config = self._get_fine_tuning_config(
head_num_classes=heads_num_classes
)
fine_tuning_config["loss"] = {
"name": "batchnorm_cross_entropy_loss",
"num_classes": heads_num_classes,
}
fine_tuning_task = build_task(fine_tuning_config)
fine_tuning_task._set_pretrained_checkpoint_dict(copy.deepcopy(checkpoint))
# run in test mode to compare the loss state. Since we have a BatchNorm module in
# the loss, its moving mean/std should be unchanged when we run in test-only mode
fine_tuning_task.set_test_only(True)
loss_state = copy.deepcopy(fine_tuning_task.loss.get_classy_state())
trainer.train(fine_tuning_task)
self._compare_state_dict(loss_state, fine_tuning_task.loss.get_classy_state())
| ClassyVision-main | test/tasks_fine_tuning_task_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| ClassyVision-main | test/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torch.nn as nn
from classy_vision.generic.profiler import (
compute_activations,
compute_flops,
count_params,
get_shape,
)
from classy_vision.models import build_model
from test.generic.config_utils import get_test_model_configs
class TestModule(nn.Module):
def __init__(self):
super().__init__()
# add parameters to the module to affect the parameter count
self.linear = nn.Linear(2, 3, bias=False)
def forward(self, x):
return x + 1
def flops(self, x):
# TODO: this should raise an exception if this function is not defined
# since the FLOPs are indeterminable
# need to define flops since this is an unknown class
return x.numel()
class TestConvModule(nn.Conv2d):
def __init__(self):
super().__init__(2, 3, (4, 4), bias=False)
# add another (unused) layer for added complexity and to test parameters
self.linear = nn.Linear(4, 5, bias=False)
def forward(self, x):
return x
def activations(self, x, out):
# TODO: this should ideally work without this function being defined
return out.numel()
def flops(self, x):
# need to define flops since this is an unknown class
return 0
class TestModuleWithTwoArguments(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x1, x2):
return x1 + x2
def flops(self, x1, x2):
return x1.numel()
class TestModuleDoubleValue(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 10, bias=False)
self.add = TestModuleWithTwoArguments()
def forward(self, x):
x = self.linear(x)
return self.add(x, x)
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(300, 300, bias=False)
self.mod = TestModule()
self.conv = TestConvModule()
# we should be able to pick up user defined parameters as well
self.extra_params = nn.Parameter(torch.randn(10, 10))
# we shouldn't count flops for an unused layer
self.unused_linear = nn.Linear(2, 2, bias=False)
def forward(self, x):
out = self.conv(x)
out = out.view(out.shape[0], -1)
out = self.mod(out)
return self.linear(out)
class TestModel2(nn.Module):
def __init__(self):
super().__init__()
# create a model which re-uses a module (conv_module in this case)
conv_module = nn.Conv2d(3, 3, (2, 2), bias=False)
self.seq_1 = nn.Sequential(conv_module)
self.seq_2 = nn.Sequential(conv_module)
def forward(self, x):
return self.seq_1(x) + self.seq_2(x)
class TestModuleWithoutFlops(nn.Module):
# this module doesn't have FLOPs defined
def forward(self, x):
return x
class TestModuleWithFlops(nn.Module):
# this module does have FLOPs defined
_flops = 1234
def __init__(self):
super().__init__()
self.mod = TestModuleWithoutFlops()
# add a conv module; this shouldn't impact the FLOPs since we define
# self.flops()
self.conv = nn.Conv2d(3, 3, (2, 2))
def forward(self, x):
return self.conv(x)
def flops(self, x):
return self._flops
class TestProfilerFunctions(unittest.TestCase):
def test_complexity_calculation_resnext(self) -> None:
model_configs = get_test_model_configs()
# make sure there are three configs returned
self.assertEqual(len(model_configs), 3)
# expected values which allow minor deviations from model changes
# we only test at the 10^6 scale
expected_m_flops = [4122, 7850, 8034]
expected_m_params = [25, 44, 44]
expected_m_activations = [11, 16, 21]
for model_config, m_flops, m_params, m_activations in zip(
model_configs, expected_m_flops, expected_m_params, expected_m_activations
):
model = build_model(model_config)
self.assertEqual(compute_activations(model) // 10**6, m_activations)
self.assertEqual(compute_flops(model) // 10**6, m_flops)
self.assertEqual(count_params(model) // 10**6, m_params)
def test_complexity_calculation(self) -> None:
model = TestModel()
input_shape = (3, 10, 10)
num_elems = 3 * 10 * 10
activations = num_elems + num_elems # linear + conv
self.assertEqual(
compute_activations(model, input_shape=input_shape), activations
)
self.assertEqual(
compute_flops(model, input_shape=input_shape),
num_elems
+ 0
+ (300 * 300), # TestModule + TestConvModule + TestModel.linear;
# TestModel.unused_linear is unused and shouldn't be counted
)
self.assertEqual(
count_params(model),
(2 * 3) + (2 * 3 * 4 * 4) + (4 * 5) + (300 * 300) + (10 * 10) + (2 * 2),
) # TestModule.linear + TestConvModule + TestConvModule.linear +
# TestModel.linear + TestModel.extra_params + TestModel.unused_linear
# test that we calculate complexity correctly for a model which re-uses a module
model = TestModel2()
in_channels = 3
out_channels = 3
out_h, out_w = 9, 9
kernel_h, kernel_w = 2, 2
conv_flops = in_channels * out_channels * out_h * out_w * kernel_h * kernel_w
conv_activations = out_channels * out_h * out_w
self.assertEqual(
compute_activations(model, input_shape=input_shape), conv_activations * 2
) # the conv is applied twice
self.assertEqual(
compute_flops(model, input_shape=input_shape), conv_flops * 2
) # the conv is applied twice
self.assertEqual(
count_params(model), in_channels * out_channels * kernel_h * kernel_w
)
def test_flops_calculation(self):
# test that a model containing a custom module which doesn't have FLOPs defined
# raises an exception
model = nn.Sequential(TestModuleWithoutFlops())
input_shape = (3, 10, 10)
with self.assertRaises(Exception):
compute_flops(model, input_shape=input_shape)
# test that a model containing a custom module does have FLOPs defined works,
# even if the module has children which don't define FLOPs
model = nn.Sequential(TestModuleWithFlops())
input_shape = (3, 10, 10)
self.assertEqual(
compute_flops(model, input_shape=input_shape), TestModuleWithFlops._flops
) # the conv is applied twice
# test that a model has a module which takes two positional arguments
model = nn.Sequential(TestModuleDoubleValue())
input_shape = (10,)
self.assertEqual(compute_flops(model, input_shape=input_shape), 110)
class TestHelperFunctions(unittest.TestCase):
def test_get_shape(self) -> None:
list_x = [torch.zeros(2, 4), torch.zeros(3, 3)]
shapes = get_shape(list_x)
expected_shapes = [torch.zeros(2, 4).size(), torch.zeros(3, 3).size()]
for shape, expected in zip(shapes, expected_shapes):
self.assertEqual(shape, expected)
dict_x = {"x1": torch.zeros(2, 4), "x2": torch.zeros(3, 3)}
shapes = get_shape(dict_x)
expected_shapes = {
"x1": torch.zeros(2, 4).size(),
"x2": torch.zeros(3, 3).size(),
}
for k, shape in shapes.items():
self.assertEqual(shape, expected_shapes[k])
list_dict_x = [
{"x1": torch.zeros(2, 4), "x2": torch.zeros(3, 3)},
{"x1": torch.zeros(3, 4), "x2": torch.zeros(4, 5)},
]
shapes = get_shape(list_dict_x)
expected_shapes = [
{"x1": torch.zeros(2, 4).size(), "x2": torch.zeros(3, 3).size()},
{"x1": torch.zeros(3, 4).size(), "x2": torch.zeros(4, 5).size()},
]
for shape, expected in zip(shapes, expected_shapes):
for k, s in shape.items():
self.assertEqual(s, expected[k])
| ClassyVision-main | test/generic_profiler_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
LinearParamScheduler,
)
class TestLienarScheduler(unittest.TestCase):
_num_epochs = 10
def _get_valid_intermediate(self):
return [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09]
def _get_valid_config(self):
return {"name": "linear", "start_value": 0.0, "end_value": 0.1}
def test_invalid_config(self):
config = self._get_valid_config()
bad_config = copy.deepcopy(config)
# No start lr
del bad_config["start_value"]
with self.assertRaises((AssertionError, TypeError)):
LinearParamScheduler.from_config(bad_config)
# No end lr
bad_config["start_value"] = config["start_value"]
del bad_config["end_value"]
with self.assertRaises((AssertionError, TypeError)):
LinearParamScheduler.from_config(bad_config)
def test_scheduler(self):
config = self._get_valid_config()
# Check as warmup
scheduler = LinearParamScheduler.from_config(config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [config["start_value"]] + self._get_valid_intermediate()
self.assertEqual(schedule, expected_schedule)
# Check as decay
tmp = config["start_value"]
config["start_value"] = config["end_value"]
config["end_value"] = tmp
scheduler = LinearParamScheduler.from_config(config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [config["start_value"]] + list(
reversed(self._get_valid_intermediate())
)
self.assertEqual(schedule, expected_schedule)
def test_build_linear_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, LinearParamScheduler))
| ClassyVision-main | test/optim_param_scheduler_linear_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.heads.vision_transformer_head import VisionTransformerHead
class TestVisionTransformerHead(unittest.TestCase):
def test_vision_transformer_head(self):
batch_size = 2
in_plane = 3
num_classes = 5
head = VisionTransformerHead(
"default_head",
num_classes=num_classes,
in_plane=in_plane,
)
input = torch.rand([batch_size, in_plane])
output = head(input)
self.assertEqual(output.shape, torch.Size([batch_size, num_classes]))
def test_vision_transformer_head_normalize_inputs(self):
batch_size = 2
in_plane = 3
head = VisionTransformerHead(
"default_head",
num_classes=None,
in_plane=in_plane,
normalize_inputs="l2",
)
input = torch.rand([batch_size, in_plane])
output = head(input)
self.assertEqual(output.shape, torch.Size([batch_size, in_plane]))
for i in range(batch_size):
output_i = output[i]
self.assertAlmostEqual(output_i.norm().item(), 1, places=3)
| ClassyVision-main | test/heads_vision_transformer_head_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import unittest
import unittest.mock as mock
import classy_vision.dataset.classy_dataset as classy_dataset
import torch
from classy_vision.dataset import build_dataset, register_dataset
from classy_vision.dataset.core import ListDataset
from test.generic.utils import compare_batches, compare_samples
from torch.utils.data import DataLoader
DUMMY_SAMPLES_1 = [
{"input": torch.tensor([[[0, 1], [2, 3]]]), "target": torch.tensor([[0]])}
]
DUMMY_SAMPLES_2 = [
{"input": torch.tensor([[[0, 1], [2, 3]]]), "target": torch.tensor([[0]])},
{"input": torch.tensor([[[4, 5], [6, 7]]]), "target": torch.tensor([[1]])},
]
BATCHED_DUMMY_SAMPLES_2 = [
{
"input": torch.tensor([[[[0, 1], [2, 3]]], [[[4, 5], [6, 7]]]]),
"target": torch.tensor([[[0]], [[1]]]),
}
]
DUMMY_CONFIG = {"name": "test_dataset", "dummy0": 0, "dummy1": 1}
OTHER_DUMMY_CONFIG = {"name": "other_test_dataset", "dummy0": 0, "dummy1": 1}
def mock_get_world_size():
return 2
def mock_get_rank():
return 1
@register_dataset("test_dataset")
class TestDataset(classy_dataset.ClassyDataset):
"""Test dataset for validating registry functions"""
def __init__(
self,
samples,
batchsize_per_replica=1,
num_samples=None,
shuffle=False,
transform=None,
):
input_tensors = [sample["input"] for sample in samples]
target_tensors = [sample["target"] for sample in samples]
dataset = ListDataset(input_tensors, target_tensors, loader=lambda x: x)
super().__init__(
dataset=dataset,
batchsize_per_replica=batchsize_per_replica,
shuffle=shuffle,
transform=transform,
num_samples=len(samples) if num_samples is None else num_samples,
)
@classmethod
def from_config(cls, config, *args, **kwargs):
return cls(*args, **kwargs)
@register_dataset("other_test_dataset")
class OtherTestDataset(classy_dataset.ClassyDataset):
"""
Test dataset for validating registry functions that has a different
type than TestDataset
"""
def __init__(self, samples, batchsize_per_replica=1):
input_tensors = [sample["input"] for sample in samples]
target_tensors = [sample["target"] for sample in samples]
dataset = ListDataset(input_tensors, target_tensors, loader=lambda x: x)
super().__init__(
dataset=dataset,
batchsize_per_replica=batchsize_per_replica,
shuffle=False,
transform=None,
num_samples=len(samples),
)
@classmethod
def from_config(cls, config, *args, **kwargs):
return cls(*args, **kwargs)
class TestRegistryFunctions(unittest.TestCase):
"""
Tests functions that use registry
"""
def test_build_model(self):
dataset = build_dataset(DUMMY_CONFIG, DUMMY_SAMPLES_1)
self.assertTrue(isinstance(dataset, TestDataset))
class TestClassyDataset(unittest.TestCase):
"""
Tests member functions of ClassyDataset. Note, NotImplemented
functions are mocked in TestDataset class.
"""
def setUp(self):
self.dataset1 = build_dataset(DUMMY_CONFIG, DUMMY_SAMPLES_1)
self.dataset2 = build_dataset(DUMMY_CONFIG, DUMMY_SAMPLES_2)
def _compare_samples(self, sample1, sample2):
compare_samples(self, sample1, sample2)
def _compare_batches(self, batch1, batch2):
compare_batches(self, batch1, batch2)
def test_init(self):
self.assertTrue(self.dataset1 is not None)
self.assertTrue(self.dataset2 is not None)
def test_len(self):
self.assertEqual(len(self.dataset1), 1)
self.assertEqual(len(self.dataset2), 2)
def test_getitem(self):
sample = self.dataset1[0]
self._compare_samples(sample, DUMMY_SAMPLES_1[0])
for idx in range(len(self.dataset2)):
sample = self.dataset2[idx]
self._compare_samples(sample, DUMMY_SAMPLES_2[idx])
def test_get_iterator(self):
# Verifies that we can retrieve samples with iterators
dl = self.dataset1.iterator(num_workers=0)
assert isinstance(
dl, DataLoader
), "Classy Iterator should return instance of PyTorch Dataloader"
next(iter(dl))
# We should be able to set num_workers to zero while also passing a mp context
dl = self.dataset1.iterator(
num_workers=0, multiprocessing_context=mp.get_context()
)
assert isinstance(
dl, DataLoader
), "Classy Iterator should return instance of PyTorch Dataloader"
next(iter(dl))
dl = self.dataset1.iterator(num_workers=2)
assert isinstance(
dl, DataLoader
), "Classy Iterator should return instance of PyTorch Dataloader"
it = iter(dl)
next(it)
# Because we use multiprocessing we delete the iterable to
# shutdown workers
del it
def test_batch_logic(self):
dataset = TestDataset(DUMMY_SAMPLES_2, batchsize_per_replica=2)
dl = dataset.iterator(num_workers=0)
batch = next(iter(dl))
self.assertEqual(batch["input"].size()[0], 2)
self._compare_batches(batch, BATCHED_DUMMY_SAMPLES_2[0])
@mock.patch(
"classy_vision.dataset.classy_dataset.get_world_size", mock_get_world_size
)
@mock.patch("classy_vision.dataset.classy_dataset.get_rank", mock_get_rank)
def test_shard_logic(self):
# This test uses a world size of 2, rank 1 to verify that the
# second sample is returned by the dataloader
dataset = TestDataset(DUMMY_SAMPLES_2, batchsize_per_replica=1)
dl = dataset.iterator(num_workers=0)
sample = next(iter(dl))
self._compare_batches(sample, DUMMY_SAMPLES_2[1])
def test_num_samples_logic(self):
dataset = TestDataset(DUMMY_SAMPLES_2)
self.assertEqual(len(dataset), 2)
dataset = TestDataset(DUMMY_SAMPLES_2, num_samples=1)
# Verify len returns right value for dataset
self.assertEqual(len(dataset), 1)
# Verify len returns right value for iterator
self.assertEqual(len(dataset.iterator(num_workers=0)), 1)
# Verify iterator returns correct number of samples
it = iter(dataset.iterator(num_workers=0))
num_samples = 0
while True:
try:
next(it)
num_samples += 1
except StopIteration:
break
self.assertEqual(num_samples, 1)
# Check assert for num_samples > length of base dataset
dataset = TestDataset(DUMMY_SAMPLES_2, num_samples=3)
with self.assertRaises(AssertionError):
len(dataset)
def test_shuffle_logic(self):
# Simple samples to test shuffling, just a single value tensor
# so we know how things were shuffled
dummy_samples_10 = [
{"input": torch.tensor([[0]]), "target": torch.tensor([0])},
{"input": torch.tensor([[1]]), "target": torch.tensor([0])},
{"input": torch.tensor([[2]]), "target": torch.tensor([0])},
{"input": torch.tensor([[3]]), "target": torch.tensor([0])},
{"input": torch.tensor([[4]]), "target": torch.tensor([0])},
{"input": torch.tensor([[5]]), "target": torch.tensor([0])},
{"input": torch.tensor([[6]]), "target": torch.tensor([0])},
{"input": torch.tensor([[7]]), "target": torch.tensor([0])},
{"input": torch.tensor([[8]]), "target": torch.tensor([0])},
{"input": torch.tensor([[9]]), "target": torch.tensor([0])},
]
dataset = TestDataset(dummy_samples_10, shuffle=True)
def unpack_tensors(tensor_list):
return [t["input"].item() for t in tensor_list]
# Epoch 0
iterator = dataset.iterator(num_workers=0, current_phase_id=0)
it = iter(iterator)
epoch_0_list = [sample for sample in it]
epoch_0_list = unpack_tensors(epoch_0_list)
# Epoch 1
iterator = dataset.iterator(num_workers=0, current_phase_id=1)
it = iter(iterator)
epoch_1_list = [sample for sample in it]
epoch_1_list = unpack_tensors(epoch_1_list)
# Should be same length, should be shuffled, should be
# different shuffles for each epoch
self.assertEqual(len(epoch_0_list), len(epoch_1_list))
self.assertTrue(epoch_0_list != [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertTrue(epoch_0_list != epoch_1_list)
# Test different shuffle seeds
iterator = dataset.iterator(num_workers=0, current_phase_id=0, shuffle_seed=10)
it = iter(iterator)
epoch_0_seed_10_list = [sample for sample in it]
epoch_0_seed_10_list = unpack_tensors(epoch_0_seed_10_list)
self.assertTrue(epoch_0_seed_10_list != epoch_0_list)
def test_transform_logic(self):
def _return_1_transform(sample):
return 1
dataset = TestDataset(DUMMY_SAMPLES_2, transform=_return_1_transform)
sample = dataset[0]
self.assertEqual(sample, 1)
| ClassyVision-main | test/dataset_classy_dataset_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.generic.util import get_torch_version
from classy_vision.models import build_model
from test.generic.utils import compare_model_state
MODELS = {
"small_densenet": {
"name": "densenet",
"num_blocks": [1, 1, 1, 1],
"init_planes": 4,
"growth_rate": 32,
"expansion": 4,
"final_bn_relu": True,
"small_input": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "trunk_output",
"in_plane": 60,
"zero_init_bias": True,
}
],
},
"small_densenet_se": {
"name": "densenet",
"num_blocks": [1, 1, 1, 1],
"init_planes": 4,
"growth_rate": 32,
"expansion": 4,
"final_bn_relu": True,
"small_input": True,
"use_se": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "trunk_output",
"in_plane": 60,
"zero_init_bias": True,
}
],
},
}
def _find_block_full_path(model, block_name):
"""Find the full path for a given block name
e.g. block3-1 --> 3.block3-1
"""
for name, _ in model.named_modules():
if name.endswith(block_name):
return name
return None
class TestDensenet(unittest.TestCase):
def _test_model(self, model_config):
"""This test will build Densenet models, run a forward pass and
verify output shape, and then verify that get / set state
works.
I do this in one test so that we construct the model a minimum
number of times.
"""
model = build_model(model_config)
# Verify forward pass works
input = torch.ones([1, 3, 32, 32])
output = model.forward(input)
self.assertEqual(output.size(), (1, 1000))
# Verify get_set_state
new_model = build_model(model_config)
state = model.get_classy_state()
new_model.set_classy_state(state)
new_state = new_model.get_classy_state()
compare_model_state(self, state, new_state, check_heads=True)
def _test_quantize_model(self, model_config):
if get_torch_version() >= [1, 11]:
import torch.ao.quantization as tq
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx
else:
import torch.quantization as tq
from torch.quantization.quantize_fx import convert_fx, prepare_fx
# quantize model
model = build_model(model_config)
model.eval()
input = torch.ones([1, 3, 32, 32])
heads = model.get_heads()
# since prepare changes the code of ClassyBlock we need to clear head first
# and reattach it later to avoid caching
model.clear_heads()
prepare_custom_config_dict = {}
head_path_from_blocks = [
_find_block_full_path(model.features, block_name)
for block_name in heads.keys()
]
fqn_to_example_inputs = None
if get_torch_version() >= [1, 13]:
from torch.ao.quantization.utils import get_fqn_to_example_inputs
fqn_to_example_inputs = get_fqn_to_example_inputs(model, (input,))
standalone_example_inputs = (torch.randn(1, 3, 3, 3),)
# we need to keep the modules used in head standalone since
# it will be accessed with path name directly in execution
if get_torch_version() >= [1, 13]:
prepare_custom_config_dict["standalone_module_name"] = [
(
head,
{"": tq.default_qconfig},
fqn_to_example_inputs["features." + head],
{"input_quantized_idxs": [0], "output_quantized_idxs": []},
None,
)
for head in head_path_from_blocks
]
else:
prepare_custom_config_dict["standalone_module_name"] = [
(
head,
{"": tq.default_qconfig},
standalone_example_inputs,
{"input_quantized_idxs": [0], "output_quantized_idxs": []},
None,
)
for head in head_path_from_blocks
]
example_inputs = (torch.randn(1, 3, 3, 3),)
if get_torch_version() >= [1, 13]:
example_inputs = fqn_to_example_inputs["initial_block"]
model.initial_block = prepare_fx(
model.initial_block, {"": tq.default_qconfig}, example_inputs
)
if get_torch_version() >= [1, 13]:
example_inputs = fqn_to_example_inputs["features"]
model.features = prepare_fx(
model.features,
{"": tq.default_qconfig},
example_inputs,
prepare_custom_config_dict,
)
model.set_heads(heads)
# calibration
model(input)
heads = model.get_heads()
model.clear_heads()
model.initial_block = convert_fx(model.initial_block)
model.features = convert_fx(model.features)
model.set_heads(heads)
output = model(input)
self.assertEqual(output.size(), (1, 1000))
def test_small_densenet(self):
self._test_model(MODELS["small_densenet"])
@unittest.skipIf(
get_torch_version() < [1, 13],
"This test is using a new api of FX Graph Mode Quantization which is only available after 1.13",
)
def test_quantized_small_densenet(self):
self._test_quantize_model(MODELS["small_densenet"])
| ClassyVision-main | test/models_densenet_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import classy_vision.heads.fully_convolutional_linear_head as fcl
import torch
class TestFullyConvolutionalLinearHead(unittest.TestCase):
def test_fully_convolutional_linear_head(self):
head = fcl.FullyConvolutionalLinearHead(
"default_head",
num_classes=2,
in_plane=3,
pool_size=[1, 3, 3],
activation_func="softmax",
use_dropout=False,
)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 8]))
def test_fully_convolutional_linear_head_eval(self):
head = fcl.FullyConvolutionalLinearHead(
"default_head",
num_classes=2,
in_plane=3,
pool_size=[1, 3, 3],
activation_func="softmax",
use_dropout=False,
).eval()
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 2]))
def test_fully_convolutional_linear_head_from_cfg(self):
head_cfg = {
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"activation_func": "softmax",
"pool_size": [1, 3, 3],
"num_classes": 2,
"in_plane": 3,
"use_dropout": False,
}
head = fcl.FullyConvolutionalLinearHead.from_config(head_cfg)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 8]))
def test_fully_convolutional_linear_head_adaptive_pool(self):
head = fcl.FullyConvolutionalLinearHead(
"default_head",
num_classes=2,
in_plane=3,
pool_size=None,
activation_func="softmax",
use_dropout=False,
)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 2]))
def test_fully_convolutional_linear_head_adaptive_pool_from_cfg(self):
head_cfg = {
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"activation_func": "softmax",
"num_classes": 2,
"in_plane": 3,
"use_dropout": False,
}
head = fcl.FullyConvolutionalLinearHead.from_config(head_cfg)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 2]))
| ClassyVision-main | test/heads_fully_convolutional_linear_head_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.optim.rmsprop import RMSProp
from test.generic.optim_test_util import TestOptimizer
class TestRMSPropOptimizer(TestOptimizer, unittest.TestCase):
def _get_config(self):
return {
"name": "rmsprop",
"num_epochs": 90,
"lr": 0.1,
"momentum": 0.9,
"weight_decay": 0.0001,
"alpha": 0.9,
"eps": 1e-8,
"centered": False,
}
def _instance_to_test(self):
return RMSProp
| ClassyVision-main | test/optim_rmsprop_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torch.nn as nn
from classy_vision.heads import ClassyHead
from classy_vision.models import (
ClassyModel,
ClassyModelHeadExecutorWrapper,
ClassyModelWrapper,
)
class TestClassyBlock(unittest.TestCase):
def setUp(self):
self.orig_wrapper_cls = self.DummyTestModel.wrapper_cls
def tearDown(self):
self.DummyTestModel.wrapper_cls = self.orig_wrapper_cls
class DummyTestHead(ClassyHead):
def __init__(self):
super().__init__("head_id")
self.layer = torch.nn.Linear(2, 2)
def forward(self, x):
return self.layer(x)
class DummyTestModel(ClassyModel):
def __init__(self):
super().__init__()
self.dummy_block = torch.nn.Linear(2, 2)
self.dummy_block2 = torch.nn.Linear(2, 2)
def forward(self, x):
out = self.dummy_block(x)
return self.dummy_block2(out)
class DummyTestModelDuplicatedBlockNames(ClassyModel):
def __init__(self):
super().__init__()
self.dummy_block = torch.nn.Linear(2, 2)
self.features = nn.Sequential()
self.features.add_module("dummy_model", torch.nn.Linear(2, 2))
def forward(self, x):
out = self.dummy_block(x)
return self.features.dummy_block(out)
def test_head_execution(self):
# test head outputs without any extra wrapper logic, which is the case with
# no wrappers or the base ClassyModelWrapper class
for wrapper_class in [None, ClassyModelWrapper]:
self.DummyTestModel.wrapper_cls = wrapper_class
model = self.DummyTestModel()
head = self.DummyTestHead()
model.set_heads({"dummy_block2": [head]})
input = torch.randn(1, 2)
output = model(input)
head_output = model.execute_heads()
self.assertTrue(torch.allclose(head(output), head_output["head_id"]))
# test that the head output is returned automatically with the
# ClassyModelHeadExecutorWrapper
self.DummyTestModel.wrapper_cls = ClassyModelHeadExecutorWrapper
model = self.DummyTestModel()
head = self.DummyTestHead()
model.set_heads({"dummy_block2": [head]})
input = torch.randn(1, 2)
output = model(input)
head_output = model.execute_heads()
self.assertTrue(torch.allclose(output, head_output["head_id"]))
def test_duplicated_head_ids(self):
model = self.DummyTestModel()
head1 = self.DummyTestHead()
head2 = self.DummyTestHead()
heads = {"dummy_block": [head1], "dummy_block2": [head2]}
with self.assertRaises(ValueError):
model.set_heads(heads)
head2.unique_id = "head_id2"
model.set_heads(heads)
def test_duplicated_block_names(self):
model = self.DummyTestModelDuplicatedBlockNames()
head = self.DummyTestHead()
heads = {"dummy_block2": [head]}
with self.assertRaises(Exception):
# there are two modules with the name "dummy_block2"
# which is not supported
model.set_heads(heads)
# can still attach to a module with a unique id
heads = {"features": [head]}
model.set_heads(heads)
def test_set_heads(self):
model = self.DummyTestModel()
head = self.DummyTestHead()
self.assertEqual(
len(model.get_heads()), 0, "heads should be empty before set_heads"
)
model.set_heads({"dummy_block2": [head]})
input = torch.randn(1, 2)
model(input)
head_outputs = model.execute_heads()
self.assertEqual(len(head_outputs), 1, "should have output for one head")
# remove all heads
model.set_heads({})
self.assertEqual(len(model.get_heads()), 0, "heads should be empty")
# try a non-existing module
with self.assertRaises(Exception):
model.set_heads({"unknown_block": [head]})
| ClassyVision-main | test/models_classy_block_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
import torchvision.transforms as transforms
from classy_vision.dataset.dataloader_async_gpu_wrapper import DataloaderAsyncGPUWrapper
from PIL import Image
from torch.utils.data import DataLoader, Dataset
class ZeroImageDataset(Dataset):
def __init__(self, crop_size, num_channels, num_classes, num_samples):
self.crop_size = crop_size
self.num_channels = num_channels
self.num_classes = num_classes
self.num_samples = num_samples
def __iter__(self):
# Spread work as mod(N)
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
return iter(range(self.num_samples))
else:
return iter(
range(worker_info.id, self.num_samples, worker_info.num_workers)
)
def __getitem__(self, index):
input_data = transforms.ToTensor()(
Image.fromarray(
np.zeros(
(self.crop_size, self.crop_size, self.num_channels), dtype=np.uint8
)
)
)
target = np.random.randint(self.num_classes)
return {"input": input_data, "target": target, "id": index}
def __len__(self):
return self.num_samples
class TestDataloaderAsyncGPUWrapper(unittest.TestCase):
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_dataset_async(self):
"""
Test that streaming datasets return the correct number of batches, and that
the length is also calculated correctly.
"""
NUM_SAMPLES = 1024
dataset = ZeroImageDataset(
crop_size=224, num_channels=3, num_classes=1000, num_samples=NUM_SAMPLES
)
base_dataloader = DataLoader(dataset=dataset, pin_memory=True, num_workers=20)
dataloader = DataloaderAsyncGPUWrapper(base_dataloader)
# Test wrap correctness
i = 0
for sample in dataloader:
# test that the data being served is all zeros
self.assertTrue(sample["input"].nonzero(as_tuple=False).numel() == 0)
# test that it's all cuda tensors
for k in sample.keys():
self.assertTrue(sample[k].device.type == "cuda")
# check that consecutive samples are independent objects in memory
sample["input"].fill_(3.14)
# check that the expected number of samples is served
i += 1
self.assertEqual(i, NUM_SAMPLES)
| ClassyVision-main | test/dataloader_async_gpu_wrapper_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Various test loaders."""
import os
import random
import unittest
from itertools import chain
def _circleci_parallelism(suite):
"""Allow for parallelism in CircleCI for speedier tests.."""
if int(os.environ.get("CIRCLE_NODE_TOTAL", 0)) <= 1:
# either not running on circleci, or we're not using parallelism.
return suite
# tests are automatically sorted by discover, so we will get the same ordering
# on all hosts.
total = int(os.environ["CIRCLE_NODE_TOTAL"])
index = int(os.environ["CIRCLE_NODE_INDEX"])
# right now each test is corresponds to a /file/. Certain files are slower than
# others, so we want to flatten it
tests = [testfile._tests for testfile in suite._tests]
tests = list(chain.from_iterable(tests))
random.Random(42).shuffle(tests)
tests = [t for i, t in enumerate(tests) if i % total == index]
return unittest.TestSuite(tests)
def unittests():
"""
Short tests.
Runs on CircleCI on every commit. Returns everything in the tests root directory.
"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover("test", pattern="*_test.py")
test_suite = _circleci_parallelism(test_suite)
return test_suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(unittests())
| ClassyVision-main | test/suites.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
StepWithFixedGammaParamScheduler,
)
class TestStepWithFixedGammaScheduler(unittest.TestCase):
_num_epochs = 12
def _get_valid_config(self):
return {
"name": "step_with_fixed_gamma",
"base_value": 1,
"gamma": 0.1,
"num_decays": 3,
"num_epochs": self._num_epochs,
}
def test_invalid_config(self):
config = self._get_valid_config()
# Invalid num epochs
bad_config = copy.deepcopy(config)
bad_config["num_epochs"] = -1
with self.assertRaises(ValueError):
StepWithFixedGammaParamScheduler.from_config(bad_config)
# Invalid num_decays
bad_config["num_decays"] = 0
with self.assertRaises(ValueError):
StepWithFixedGammaParamScheduler.from_config(bad_config)
# Invalid base_value
bad_config = copy.deepcopy(config)
bad_config["base_value"] = -0.01
with self.assertRaises(ValueError):
StepWithFixedGammaParamScheduler.from_config(bad_config)
# Invalid gamma
bad_config = copy.deepcopy(config)
bad_config["gamma"] = [2]
with self.assertRaises(ValueError):
StepWithFixedGammaParamScheduler.from_config(bad_config)
def test_scheduler(self):
config = self._get_valid_config()
scheduler = StepWithFixedGammaParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
1,
1,
1,
0.1,
0.1,
0.1,
0.01,
0.01,
0.01,
0.001,
0.001,
0.001,
]
for param, expected_param in zip(schedule, expected_schedule):
self.assertAlmostEqual(param, expected_param)
def test_build_step_with_fixed_gamma_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, StepWithFixedGammaParamScheduler))
| ClassyVision-main | test/optim_param_scheduler_step_with_fixed_gamma_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import tempfile
import unittest
import torch
from classy_vision.dataset import build_dataset, ClassyDataset
from classy_vision.dataset.image_path_dataset import ImagePathDataset
from torchvision import transforms
def _sort_key(x):
# sorts x which could correspond to either (sample["input"], sample["target"]),
# or sample["input"]
if isinstance(x, tuple):
return x[0].tolist() + x[1].tolist()
else:
return x.tolist()
class TestImageDataset(unittest.TestCase):
def get_test_image_dataset(self):
config = {
"name": "synthetic_image",
"crop_size": 224,
"num_channels": 3,
"seed": 0,
"class_ratio": 0.5,
"num_samples": 100,
"batchsize_per_replica": 1,
"use_shuffle": False,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [{"name": "ToTensor"}],
"key": "input",
}
],
}
dataset = build_dataset(config)
return dataset
def setUp(self):
# create a base directory to write image files to
self.base_dir = tempfile.mkdtemp()
# create a dir to store images in the torchvision.ImageFolder format
self.torchvision_dir = f"{self.base_dir}/tv"
os.mkdir(self.torchvision_dir)
os.mkdir(f"{self.torchvision_dir}/0")
os.mkdir(f"{self.torchvision_dir}/1")
# create a dir to store images in the other format
self.other_dir = f"{self.base_dir}/other"
os.mkdir(self.other_dir)
def tearDown(self):
# delete all the temporary data created
shutil.rmtree(self.base_dir)
def get_dataset_config(self):
return {
"name": "image_path",
"batchsize_per_replica": 1,
"use_shuffle": False,
"num_samples": None,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [{"name": "ToTensor"}],
"key": "input",
}
],
}
def test_build_dataset(self):
config = self.get_dataset_config()
config["image_files"] = ["abc"]
dataset = build_dataset(config)
self.assertIsInstance(dataset, ClassyDataset)
# test invalid configs
# cannot pass both image_files and image_folder
config["image_folder"] = self.torchvision_dir
with self.assertRaises(Exception):
dataset = build_dataset(config)
# cannot skip both image_files and image_folder
config.pop("image_files")
config.pop("image_folder")
with self.assertRaises(Exception):
dataset = build_dataset(config)
def test_image_dataset(self):
image_files = []
inputs = []
targets = {}
dataloader = self.get_test_image_dataset().iterator()
for i, sample in enumerate(dataloader):
input = sample["input"]
target = sample["target"]
image = transforms.ToPILImage()(input.squeeze())
path = f"{self.torchvision_dir}/{target.item()}/{i}.png"
image_files.append(path)
image.save(path)
path = f"{self.other_dir}/{i}.png"
image.save(path)
inputs.append(input)
targets[input] = target
config = self.get_dataset_config()
config["image_files"] = image_files
# test the dataset using image_files
dataset = ImagePathDataset.from_config(config)
dataloader = dataset.iterator()
# the samples should be in the same order
for sample, expected_input in zip(dataloader, inputs):
self.assertTrue(torch.allclose(sample["input"], expected_input))
config.pop("image_files")
# test the dataset with image_folder of type torchvision.ImageFolder
config["image_folder"] = self.torchvision_dir
dataset = ImagePathDataset.from_config(config)
dataloader = dataset.iterator()
# the order doesn't matter, so we sort the results
# note that this test assumes that the target for directory 0 will be 0
for (input, target), (expected_input, expected_target) in zip(
sorted(
((sample["input"], sample["target"]) for sample in dataloader),
key=_sort_key,
),
sorted(targets.items(), key=_sort_key),
):
self.assertTrue(torch.allclose(input, expected_input))
self.assertEqual(target, expected_target)
# test the dataset with image_folder of the second type
config["image_folder"] = self.other_dir
dataset = ImagePathDataset.from_config(config)
dataloader = dataset.iterator()
# the order doesn't matter, so we sort the results
for input, expected_input in zip(
sorted((sample["input"] for sample in dataloader), key=_sort_key),
sorted(inputs, key=_sort_key),
):
self.assertTrue(torch.allclose(input, expected_input))
| ClassyVision-main | test/dataset_image_path_dataset_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.models import build_model, ClassyModel
from test.generic.utils import compare_model_state
class TestResNeXt3D(unittest.TestCase):
def setUp(self):
model_config_template = {
"name": "resnext3d",
"input_key": "video",
"clip_crop_size": 112,
"skip_transformation_type": "postactivated_shortcut",
"frames_per_clip": 32,
"input_planes": 3,
"stem_name": "resnext3d_stem",
"stem_planes": 64,
"stem_temporal_kernel": 3,
"stage_planes": 64,
"num_groups": 1,
"width_per_group": 16,
"heads": [
{
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"in_plane": 512,
"pool_size": (2, 7, 7),
"activation_func": "softmax",
"num_classes": 2,
}
],
}
pbt = "postactivated_bottleneck_transformation"
model_config_variants = [
# ResNeXt3D-34
{
"residual_transformation_type": "basic_transformation",
"num_blocks": [3, 4, 6, 3],
},
# ResNeXt3D-50
{"residual_transformation_type": pbt, "num_blocks": [3, 4, 6, 3]},
# ResNeXt3D-101
{"residual_transformation_type": pbt, "num_blocks": [3, 4, 23, 3]},
]
self.model_configs = []
for variant in model_config_variants:
model_config = copy.deepcopy(model_config_template)
model_config.update(variant)
block_idx = model_config["num_blocks"][-1]
# attach the head at the last block
model_config["heads"][0]["fork_block"] = "pathway0-stage4-block%d" % (
block_idx - 1
)
self.model_configs.append(model_config)
self.batchsize = 1
self.forward_pass_configs = {
"train": {
# input shape: N x C x T x H x W
"input": {"video": torch.rand(self.batchsize, 3, 16, 112, 112)},
"model": {
"stem_maxpool": False,
"stage_temporal_stride": [1, 2, 2, 2],
"stage_spatial_stride": [1, 2, 2, 2],
},
},
"test": {
"input": {"video": torch.rand(self.batchsize, 3, 16, 256, 320)},
"model": {
"stem_maxpool": True,
"stage_temporal_stride": [1, 2, 2, 2],
"stage_spatial_stride": [1, 2, 2, 2],
},
},
}
def test_build_model(self):
for model_config in self.model_configs:
model = build_model(model_config)
self.assertTrue(isinstance(model, ClassyModel))
def test_forward_pass(self):
for split, split_config in self.forward_pass_configs.items():
for model_config in self.model_configs:
forward_pass_model_config = copy.deepcopy(model_config)
forward_pass_model_config.update(split_config["model"])
num_classes = forward_pass_model_config["heads"][0]["num_classes"]
model = build_model(forward_pass_model_config)
model.train(split == "train")
out = model(split_config["input"])
self.assertEqual(out.size(), (self.batchsize, num_classes))
def test_set_classy_state_plain(self):
# We use the same model architecture to save and load a model state.
# This is a plain use case of `set_classy_state` method
for model_config in self.model_configs:
model = build_model(model_config)
model_state = model.get_classy_state()
model2 = build_model(model_config)
model2.set_classy_state(model_state)
model2_state = model2.get_classy_state()
compare_model_state(self, model_state, model2_state)
def _get_model_config_weight_inflation(self):
model_2d_config = {
"name": "resnext3d",
"frames_per_clip": 1,
"input_planes": 3,
"clip_crop_size": 224,
"skip_transformation_type": "postactivated_shortcut",
"residual_transformation_type": "postactivated_bottleneck_transformation",
"num_blocks": [3, 4, 6, 3],
"stem_name": "resnext3d_stem",
"stem_planes": 64,
"stem_temporal_kernel": 1,
"stem_spatial_kernel": 7,
"stem_maxpool": True,
"stage_planes": 256,
"stage_temporal_kernel_basis": [[1], [1], [1], [1]],
"temporal_conv_1x1": [True, True, True, True],
"stage_temporal_stride": [1, 1, 1, 1],
"stage_spatial_stride": [1, 2, 2, 2],
"num_groups": 1,
"width_per_group": 64,
"num_classes": 1000,
"zero_init_residual_transform": True,
"heads": [
{
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"pool_size": [1, 7, 7],
"activation_func": "softmax",
"num_classes": 1000,
"fork_block": "pathway0-stage4-block2",
"in_plane": 2048,
"use_dropout": False,
}
],
}
model_3d_config = {
"name": "resnext3d",
"frames_per_clip": 8,
"input_planes": 3,
"clip_crop_size": 224,
"skip_transformation_type": "postactivated_shortcut",
"residual_transformation_type": "postactivated_bottleneck_transformation",
"num_blocks": [3, 4, 6, 3],
"input_key": "video",
"stem_name": "resnext3d_stem",
"stem_planes": 64,
"stem_temporal_kernel": 5,
"stem_spatial_kernel": 7,
"stem_maxpool": True,
"stage_planes": 256,
"stage_temporal_kernel_basis": [[3], [3, 1], [3, 1], [1, 3]],
"temporal_conv_1x1": [True, True, True, True],
"stage_temporal_stride": [1, 1, 1, 1],
"stage_spatial_stride": [1, 2, 2, 2],
"num_groups": 1,
"width_per_group": 64,
"num_classes": 1000,
"freeze_trunk": False,
"zero_init_residual_transform": True,
"heads": [
{
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"pool_size": [8, 7, 7],
"activation_func": "softmax",
"num_classes": 1000,
"fork_block": "pathway0-stage4-block2",
"in_plane": 2048,
"use_dropout": True,
}
],
}
return model_2d_config, model_3d_config
def test_set_classy_state_weight_inflation(self):
# Get model state from a 2D ResNet model, inflate the 2D conv weights,
# and use them to initialize 3D conv weights. This is an advanced use of
# `set_classy_state` method.
model_2d_config, model_3d_config = self._get_model_config_weight_inflation()
model_2d = build_model(model_2d_config)
model_2d_state = model_2d.get_classy_state()
model_3d = build_model(model_3d_config)
model_3d.set_classy_state(model_2d_state)
model_3d_state = model_3d.get_classy_state()
for name, weight_2d in model_2d_state["model"]["trunk"].items():
weight_3d = model_3d_state["model"]["trunk"][name]
if weight_2d.dim() == 5:
# inflation only applies to conv weights
self.assertEqual(weight_3d.dim(), 5)
if weight_2d.shape[2] == 1 and weight_3d.shape[2] > 1:
weight_2d_inflated = (
weight_2d.repeat(1, 1, weight_3d.shape[2], 1, 1)
/ weight_3d.shape[2]
)
self.assertTrue(torch.equal(weight_3d, weight_2d_inflated))
def test_set_classy_state_weight_inflation_inconsistent_kernel_size(self):
# Get model state from a 2D ResNet model, inflate the 2D conv weights,
# and use them to initialize 3D conv weights.
model_2d_config, model_3d_config = self._get_model_config_weight_inflation()
# Modify conv kernel size in the stem layer of 2D model to 5, which is
# inconsistent with the kernel size 7 used in 3D model.
model_2d_config["stem_spatial_kernel"] = 5
model_2d = build_model(model_2d_config)
model_2d_state = model_2d.get_classy_state()
model_3d = build_model(model_3d_config)
with self.assertRaises(AssertionError):
model_3d.set_classy_state(model_2d_state)
| ClassyVision-main | test/models_resnext3d_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import os
import subprocess
import tempfile
import unittest
from pathlib import Path
import classy_vision.optim # NOQA
import torch
import torch.distributed as dist
from classy_vision.optim.zero import ZeRO
from test.generic.config_utils import (
get_distributed_launch_cmd,
get_fast_test_task_config,
)
from test.generic.optim_test_util import TestOptimizer
def dist_init(rank, world_size, filename):
dist.init_process_group(
init_method="file://" + filename,
backend=dist.Backend.GLOO,
rank=rank,
world_size=world_size,
)
class TestOptimizerStateShardingIntegration(unittest.TestCase, TestOptimizer):
@staticmethod
def _maybe_destroy_dist():
if dist.is_initialized():
logging.debug("Destroy previous torch dist process group")
dist.destroy_process_group()
def setUp(self):
self._maybe_destroy_dist()
self.filename = tempfile.NamedTemporaryFile(delete=True).name
dist_init(0, 1, self.filename)
def tearDown(self):
self._maybe_destroy_dist()
def _get_config(self):
return {"name": "zero", "base_optimizer": {"name": "sgd"}, "num_epochs": 3}
def _instance_to_test(self):
return ZeRO
class TestOptimizerStateSharding(unittest.TestCase):
def setUp(self):
self.path = Path(__file__).parent.absolute()
# Save the task config file on disk
config = self._get_task_config()
with tempfile.NamedTemporaryFile(mode="w", delete=False) as file_io:
json.dump(config, file_io)
file_io.flush()
self.config_path = file_io.name
def tearDown(self):
if self.config_path is not None:
os.unlink(self.config_path)
def _get_task_config(self):
config = get_fast_test_task_config()
config["optimizer"] = {
"name": "zero",
"base_optimizer": {"name": "sgd", "momentum": 0.9},
}
return config
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_oss_sgd(self):
"""
Test that the optimizer is correctly instantiated and that a task can run
"""
num_processes = 2
cmd = get_distributed_launch_cmd(
num_processes=num_processes,
trainer_path=f"{Path(__file__).parent.absolute()}/../classy_train.py",
config_path=self.config_path,
)
result = subprocess.run(cmd, shell=True)
self.assertEqual(result.returncode, 0)
| ClassyVision-main | test/optim_sharded_sgd_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torchvision.transforms as transforms
from classy_vision.dataset.transforms import (
build_transforms,
ClassyTransform,
register_transform,
)
from classy_vision.dataset.transforms.util import ImagenetNoAugmentTransform
@register_transform("resize")
class resize(ClassyTransform):
def __init__(self, size: int):
self.transform = transforms.Resize(size=size)
def __call__(self, img):
return self.transform(img)
@register_transform("center_crop")
class center_crop(ClassyTransform):
def __init__(self, size: int):
self.transform = transforms.CenterCrop(size=size)
def __call__(self, img):
return self.transform(img)
class DatasetTransformsTest(unittest.TestCase):
def get_test_image(self):
return transforms.ToPILImage()(torch.randn((3, 224, 224)))
def test_transforms(self):
input = self.get_test_image()
# reference transform which we will use to validate the built transforms
reference_transform = ImagenetNoAugmentTransform()
reference_output = reference_transform(input)
# test a registered transform
config = [{"name": "imagenet_no_augment"}]
transform = build_transforms(config)
output = transform(input)
self.assertTrue(torch.allclose(output, reference_output))
# test a transform built using torchvision transforms
config = [
{"name": "Resize", "size": 256},
{"name": "CenterCrop", "size": 224},
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
]
transform = build_transforms(config)
output = transform(input)
self.assertTrue(torch.allclose(output, reference_output))
# test a combination of registered and torchvision transforms
config = [
{"name": "resize", "size": 256},
{"name": "center_crop", "size": 224},
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
]
transform = build_transforms(config)
output = transform(input)
self.assertTrue(torch.allclose(output, reference_output))
| ClassyVision-main | test/dataset_transforms_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
CosineParamScheduler,
)
class TestCosineScheduler(unittest.TestCase):
_num_epochs = 10
def _get_valid_decay_config(self):
return {"name": "cosine", "start_value": 0.1, "end_value": 0}
def _get_valid_decay_config_intermediate_values(self):
return [0.0976, 0.0905, 0.0794, 0.0655, 0.05, 0.0345, 0.0206, 0.0095, 0.0024]
def test_invalid_config(self):
# Invalid num epochs
config = self._get_valid_decay_config()
bad_config = copy.deepcopy(config)
# Invalid Base lr
del bad_config["start_value"]
with self.assertRaises((AssertionError, TypeError)):
CosineParamScheduler.from_config(bad_config)
# Invalid end_value
bad_config["start_value"] = config["start_value"]
del bad_config["end_value"]
with self.assertRaises((AssertionError, TypeError)):
CosineParamScheduler.from_config(bad_config)
def test_scheduler_as_decay(self):
config = self._get_valid_decay_config()
scheduler = CosineParamScheduler.from_config(config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
config["start_value"]
] + self._get_valid_decay_config_intermediate_values()
self.assertEqual(schedule, expected_schedule)
def test_scheduler_as_warmup(self):
config = self._get_valid_decay_config()
# Swap start and end lr to change to warmup
tmp = config["start_value"]
config["start_value"] = config["end_value"]
config["end_value"] = tmp
scheduler = CosineParamScheduler.from_config(config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
# Schedule should be decay reversed
expected_schedule = [config["start_value"]] + list(
reversed(self._get_valid_decay_config_intermediate_values())
)
self.assertEqual(schedule, expected_schedule)
def test_scheduler_warmup_decay_match(self):
decay_config = self._get_valid_decay_config()
decay_scheduler = CosineParamScheduler.from_config(decay_config)
warmup_config = copy.deepcopy(decay_config)
# Swap start and end lr to change to warmup
tmp = warmup_config["start_value"]
warmup_config["start_value"] = warmup_config["end_value"]
warmup_config["end_value"] = tmp
warmup_scheduler = CosineParamScheduler.from_config(warmup_config)
decay_schedule = [
round(decay_scheduler(epoch_num / 1000), 8) for epoch_num in range(1, 1000)
]
warmup_schedule = [
round(warmup_scheduler(epoch_num / 1000), 8) for epoch_num in range(1, 1000)
]
self.assertEqual(decay_schedule, list(reversed(warmup_schedule)))
def test_build_cosine_scheduler(self):
config = self._get_valid_decay_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, CosineParamScheduler))
| ClassyVision-main | test/optim_param_scheduler_cosine_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
)
from classy_vision.dataset.transforms.autoaugment import ImagenetAutoAugment # noqa
from classy_vision.dataset.transforms.util import build_field_transform_default_imagenet
class AutoaugmentTransformTest(unittest.TestCase):
def get_test_image_dataset(self):
return RandomImageBinaryClassDataset(
crop_size=224, class_ratio=0.5, num_samples=100, seed=0
)
def test_imagenet_autoaugment_transform_no_errors(self):
"""
Tests that the imagenet autoaugment transform runs without any errors.
"""
dataset = self.get_test_image_dataset()
config = [{"name": "imagenet_autoaugment"}]
transform = build_field_transform_default_imagenet(config)
sample = dataset[0]
# test that imagenet autoaugment has been registered and runs without errors
transform(sample)
| ClassyVision-main | test/dataset_transforms_autoaugment_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import shutil
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
import classy_vision.generic.util as util
import torch
import torch.nn as nn
from classy_vision.generic.util import (
CHECKPOINT_FILE,
get_torch_version,
load_checkpoint,
save_checkpoint,
split_batchnorm_params,
Timer,
update_classy_model,
update_classy_state,
)
from classy_vision.models import build_model
from classy_vision.tasks import build_task
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_fast_test_task_config, get_test_model_configs
from test.generic.utils import compare_model_state, compare_states
ROOT = Path(__file__).parent
@mock.patch("torch.tensor")
def get_mock_tensor(mock_class):
def get_cuda_tensor():
t = torch.tensor([1, 2, 3])
t.is_cuda = True
return t
mock_class.return_value.cuda.return_value = get_cuda_tensor()
mock_class.is_cuda = False
return torch.tensor([1, 2, 3])
class TestUtilMethods(unittest.TestCase):
class StructuredInput:
def __init__(self, tensor_a, tensor_b):
self.tensor_a = tensor_a
self.tensor_b = tensor_b
def to(self, device, non_blocking):
return TestUtilMethods.StructuredInput(
tensor_a=self.tensor_a.to(device=device, non_blocking=non_blocking),
tensor_b=self.tensor_b.to(device=device, non_blocking=non_blocking),
)
def test_recursive_copy_to_gpu(self):
tensor_a = get_mock_tensor()
tensor_b = get_mock_tensor()
tensor_structured = TestUtilMethods.StructuredInput(
tensor_a=get_mock_tensor(), tensor_b=get_mock_tensor()
)
valid_gpu_copy_value = tensor_a
gpu_value = util.recursive_copy_to_gpu(valid_gpu_copy_value)
self.assertTrue(gpu_value.is_cuda)
valid_recursive_copy_value = [[tensor_a]]
gpu_value = util.recursive_copy_to_gpu(valid_recursive_copy_value)
self.assertTrue(gpu_value[0][0].is_cuda)
valid_gpu_copy_collections = [
(tensor_a, tensor_b),
[tensor_a, tensor_b],
{"tensor_a": tensor_a, "tensor_b": tensor_b},
]
for value in valid_gpu_copy_collections:
gpu_value = util.recursive_copy_to_gpu(value)
if isinstance(value, dict):
self.assertTrue(gpu_value["tensor_a"].is_cuda)
self.assertTrue(gpu_value["tensor_b"].is_cuda)
else:
self.assertEqual(len(gpu_value), 2)
self.assertTrue(gpu_value[0].is_cuda)
self.assertTrue(gpu_value[1].is_cuda)
value = {"a": "b"}
self.assertEqual(value, util.recursive_copy_to_gpu(value))
valid_gpu_copy_structured = tensor_structured
gpu_value = util.recursive_copy_to_gpu(valid_gpu_copy_structured)
self.assertTrue(gpu_value.tensor_a.is_cuda)
self.assertTrue(gpu_value.tensor_b.is_cuda)
_json_config_file = ROOT / "generic_util_json_blob_test.json"
def _get_config(self):
return {
"name": "test_task",
"num_epochs": 12,
"loss": {"name": "test_loss"},
"dataset": {
"name": "test_data",
"batchsize_per_replica": 8,
"use_pairs": False,
"num_samples": None,
"use_shuffle": {"train": True, "test": False},
},
"meters": [{"name": "test_meter", "test_param": 0.1}],
"model": {"name": "test_model", "architecture": [1, 2, 3, 4]},
"optimizer": {
"name": "test_optimizer",
"test_param": {
"name": "test_scheduler",
"values": [0.1, 0.01, 0.001, 0.0001],
},
},
}
def test_load_config(self):
expected_config = self._get_config()
config = util.load_json(self._json_config_file)
self.assertEqual(config, expected_config)
def test_torch_seed(self):
# test that using util.torch_seed doesn't impact the generation of
# random numbers outside its context and that random numbers generated
# within its context are the same as setting a manual seed
torch.manual_seed(0)
torch.randn(10)
random_tensor_1 = torch.randn(10)
torch.manual_seed(0)
torch.randn(10)
with util.torch_seed(1):
random_tensor_2 = torch.randn(10)
self.assertTrue(torch.equal(torch.randn(10), random_tensor_1))
torch.manual_seed(1)
self.assertTrue(torch.equal(torch.randn(10), random_tensor_2))
def test_get_model_dummy_input(self):
for config in get_test_model_configs():
model = build_model(config) # pass in a dummy model for the cuda check
batchsize = 8
# input_key is list
input_key = ["audio", "video"]
input_shape = [[3, 40, 100], [4, 16, 223, 223]] # dummy input shapes
result = util.get_model_dummy_input(
model, input_shape, input_key, batchsize
)
self.assertEqual(result.keys(), {"audio", "video"})
for i in range(len(input_key)):
self.assertEqual(
result[input_key[i]].size(), tuple([batchsize] + input_shape[i])
)
# input_key is string
input_key = "video"
input_shape = [4, 16, 223, 223]
result = util.get_model_dummy_input(
model, input_shape, input_key, batchsize
)
self.assertEqual(result.keys(), {"video"})
self.assertEqual(result[input_key].size(), tuple([batchsize] + input_shape))
# input_key is None
input_key = None
input_shape = [4, 16, 223, 223]
result = util.get_model_dummy_input(
model, input_shape, input_key, batchsize
)
self.assertEqual(result.size(), tuple([batchsize] + input_shape))
def _compare_model_train_mode(self, model_1, model_2):
for name_1, module_1 in model_1.named_modules():
found = False
for name_2, module_2 in model_2.named_modules():
if name_1 == name_2:
found = True
if module_1.training != module_2.training:
return False
if not found:
return False
return True
def _check_model_train_mode(self, model, expected_mode):
for module in model.modules():
if module.training != expected_mode:
return False
return True
def test_split_batchnorm_params(self):
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(2, 3, bias=False)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(3)
def forward(self, x):
return self.bn(self.relu(self.lin(x)))
torch.manual_seed(1)
model = MyModel()
bn_params, lin_params = split_batchnorm_params(model)
self.assertEquals(len(bn_params), 2)
self.assertEquals(len(lin_params), 1)
self.assertTrue(torch.allclose(bn_params[0], model.bn.weight))
self.assertTrue(torch.allclose(bn_params[1], model.bn.bias))
self.assertTrue(torch.allclose(lin_params[0], model.lin.weight))
def test_train_model_eval_model(self):
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.dropout = nn.Dropout()
self.seq = nn.Sequential(
nn.ReLU(), nn.Conv2d(1, 2, 3), nn.BatchNorm2d(1, 2)
)
test_model = TestModel()
for train in [True, False]:
test_model.train(train)
# flip some of the modes
test_model.dropout.train(not train)
test_model.seq[1].train(not train)
orig_model = copy.deepcopy(test_model)
with util.train_model(test_model):
self._check_model_train_mode(test_model, True)
# the modes should be different inside the context manager
self.assertFalse(self._compare_model_train_mode(orig_model, test_model))
self.assertTrue(self._compare_model_train_mode(orig_model, test_model))
with util.eval_model(test_model):
self._check_model_train_mode(test_model, False)
# the modes should be different inside the context manager
self.assertFalse(self._compare_model_train_mode(orig_model, test_model))
self.assertTrue(self._compare_model_train_mode(orig_model, test_model))
@mock.patch("time.perf_counter")
def test_timer(self, mock_perf_counter: mock.MagicMock):
def test_func(a, b=2):
return a + b
start_time = 10
end_time = 12
mock_perf_counter.side_effect = [start_time, end_time]
with Timer() as timer:
test_func(1, b=3)
self.assertAlmostEqual(timer.elapsed_time, end_time - start_time)
class TestUpdateStateFunctions(unittest.TestCase):
def _compare_states(self, state_1, state_2, check_heads=True):
compare_states(self, state_1, state_2)
def _compare_model_state(self, state_1, state_2, check_heads=True):
return compare_model_state(self, state_1, state_2, check_heads=check_heads)
def test_update_classy_state(self):
"""
Tests that the update_classy_state successfully updates from a
checkpoint
"""
config = get_fast_test_task_config()
task = build_task(config)
task_2 = build_task(config)
task_2.prepare()
trainer = LocalTrainer()
trainer.train(task)
update_classy_state(task_2, task.get_classy_state(deep_copy=True))
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
def test_update_classy_model(self):
"""
Tests that the update_classy_model successfully updates from a
checkpoint
"""
config = get_fast_test_task_config()
task = build_task(config)
trainer = LocalTrainer()
trainer.train(task)
for reset_heads in [False, True]:
task_2 = build_task(config)
# prepare task_2 for the right device
task_2.prepare()
update_classy_model(
task_2.model, task.model.get_classy_state(deep_copy=True), reset_heads
)
self._compare_model_state(
task.model.get_classy_state(),
task_2.model.get_classy_state(),
check_heads=not reset_heads,
)
if reset_heads:
# the model head states should be different
with self.assertRaises(Exception):
self._compare_model_state(
task.model.get_classy_state(),
task_2.model.get_classy_state(),
check_heads=True,
)
class TestCheckpointFunctions(unittest.TestCase):
def setUp(self):
# create a base directory to write checkpoints to
self.base_dir = tempfile.mkdtemp()
def tearDown(self):
# delete all the temporary data created
shutil.rmtree(self.base_dir)
def test_save_and_load_checkpoint(self):
checkpoint_dict = {str(i): i * 2 for i in range(1000)}
# save to the default checkpoint file
save_checkpoint(self.base_dir, checkpoint_dict)
# load the checkpoint by using the default file
loaded_checkpoint = load_checkpoint(self.base_dir)
self.assertDictEqual(checkpoint_dict, loaded_checkpoint)
# load the checkpoint by passing the full path
checkpoint_path = f"{self.base_dir}/{CHECKPOINT_FILE}"
loaded_checkpoint = load_checkpoint(checkpoint_path)
self.assertDictEqual(checkpoint_dict, loaded_checkpoint)
# create a new checkpoint dict
filename = "my_checkpoint.torch"
checkpoint_dict = {str(i): i * 3 for i in range(1000)}
# save the checkpoint to a different file
save_checkpoint(self.base_dir, checkpoint_dict, checkpoint_file=filename)
# load the checkpoint by passing the full path
checkpoint_path = f"{self.base_dir}/{filename}"
loaded_checkpoint = load_checkpoint(checkpoint_path)
self.assertDictEqual(checkpoint_dict, loaded_checkpoint)
@mock.patch("classy_vision.generic.util.torch")
def test_get_torch_version(self, mock_torch: mock.MagicMock):
mock_torch.__version__ = "1.7.2"
self.assertEqual(get_torch_version(), [1, 7])
self.assertLess(get_torch_version(), [1, 8])
self.assertGreater(get_torch_version(), [1, 6])
mock_torch.__version__ = "1.11.2a"
self.assertEqual(get_torch_version(), [1, 11])
self.assertLess(get_torch_version(), [1, 13])
self.assertGreater(get_torch_version(), [1, 8])
| ClassyVision-main | test/generic_util_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.optim.adam import Adam
from test.generic.optim_test_util import TestOptimizer
class TestAdamOptimizer(TestOptimizer, unittest.TestCase):
def _check_momentum_buffer(self):
return False
def _get_config(self):
return {
"name": "adam",
"num_epochs": 90,
"lr": 0.1,
"betas": (0.9, 0.99),
"eps": 1e-8,
"weight_decay": 0.0001,
"amsgrad": False,
}
def _instance_to_test(self):
return Adam
| ClassyVision-main | test/optim_adam_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import unittest
import torch
import torchvision.models
from classy_vision.generic.util import get_torch_version
from classy_vision.models import build_model, ResNeXt
from test.generic.utils import compare_model_state
MODELS = {
"small_resnext": {
"name": "resnext",
"num_blocks": [1, 1, 1, 1],
"init_planes": 4,
"reduction": 4,
"base_width_and_cardinality": [2, 32],
"small_input": True,
"zero_init_bn_residuals": True,
"basic_layer": True,
"final_bn_relu": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-0",
"in_plane": 128,
}
],
},
"small_resnet": {
"name": "resnet",
"num_blocks": [1, 1, 1, 1],
"init_planes": 4,
"reduction": 4,
"small_input": True,
"zero_init_bn_residuals": True,
"basic_layer": True,
"final_bn_relu": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-0",
"in_plane": 128,
}
],
},
"small_resnet_se": {
"name": "resnet",
"num_blocks": [1, 1, 1, 1],
"init_planes": 4,
"reduction": 4,
"small_input": True,
"zero_init_bn_residuals": True,
"basic_layer": True,
"final_bn_relu": True,
"use_se": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-0",
"in_plane": 128,
}
],
},
}
def _find_block_full_path(model, block_name):
"""Find the full path for a given block name
e.g. block3-1 --> 3.block3-1
"""
for name, _ in model.named_modules():
if name.endswith(block_name):
return name
return None
def _post_training_quantize(model, input):
if get_torch_version() >= [1, 11]:
import torch.ao.quantization as tq
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx
else:
import torch.quantization as tq
from torch.quantization.quantize_fx import convert_fx, prepare_fx
model.eval()
fqn_to_example_inputs = None
if get_torch_version() >= [1, 13]:
from torch.ao.quantization.utils import get_fqn_to_example_inputs
fqn_to_example_inputs = get_fqn_to_example_inputs(model, (input,))
heads = model.get_heads()
# since prepare changes the code of ClassyBlock we need to clear head first
# and reattach it later to avoid caching
model.clear_heads()
prepare_custom_config_dict = {}
head_path_from_blocks = [
_find_block_full_path(model.blocks, block_name) for block_name in heads.keys()
]
# we need to keep the modules used in head standalone since
# it will be accessed with path name directly in execution
if get_torch_version() >= [1, 13]:
prepare_custom_config_dict["standalone_module_name"] = [
(
head,
tq.get_default_qconfig_mapping("fbgemm"),
fqn_to_example_inputs["blocks." + head],
{"input_quantized_idxs": [0], "output_quantized_idxs": []},
None,
)
for head in head_path_from_blocks
]
else:
standalone_example_inputs = (torch.rand(1, 3, 3, 3),)
prepare_custom_config_dict["standalone_module_name"] = [
(
head,
{"": tq.default_qconfig},
standalone_example_inputs,
{"input_quantized_idxs": [0], "output_quantized_idxs": []},
None,
)
for head in head_path_from_blocks
]
example_inputs = (torch.rand(1, 3, 3, 3),)
if get_torch_version() >= [1, 13]:
example_inputs = fqn_to_example_inputs["initial_block"]
model.initial_block = prepare_fx(
model.initial_block, tq.get_default_qconfig_mapping("fbgemm"), example_inputs
)
if get_torch_version() >= [1, 13]:
example_inputs = fqn_to_example_inputs["blocks"]
model.blocks = prepare_fx(
model.blocks,
tq.get_default_qconfig_mapping("fbgemm"),
example_inputs,
prepare_custom_config_dict,
)
model.set_heads(heads)
# calibration
model(input)
heads = model.get_heads()
model.clear_heads()
model.initial_block = convert_fx(model.initial_block)
model.blocks = convert_fx(model.blocks)
model.set_heads(heads)
return model
class TestResnext(unittest.TestCase):
def _test_model(self, model_config):
"""This test will build ResNeXt-* models, run a forward pass and
verify output shape, and then verify that get / set state
works.
I do this in one test so that we construct the model a minimum
number of times.
"""
model = build_model(model_config)
# Verify forward pass works
input = torch.ones([1, 3, 32, 32])
output = model.forward(input)
self.assertEqual(output.size(), (1, 1000))
# Verify get_set_state
new_model = build_model(model_config)
state = model.get_classy_state()
new_model.set_classy_state(state)
new_state = new_model.get_classy_state()
compare_model_state(self, state, new_state, check_heads=True)
def _test_quantize_model(self, model_config):
"""This test will build ResNeXt-* models, quantize the model
with fx graph mode quantization, run a forward pass and
verify output shape, and then verify that get / set state
works.
"""
model = build_model(model_config)
# Verify forward pass works
input = torch.ones([1, 3, 32, 32])
output = model.forward(input)
self.assertEqual(output.size(), (1, 1000))
model = _post_training_quantize(model, input)
# Verify forward pass works
input = torch.ones([1, 3, 32, 32])
output = model.forward(input)
self.assertEqual(output.size(), (1, 1000))
# Verify get_set_state
new_model = build_model(model_config)
new_model = _post_training_quantize(new_model, input)
state = model.get_classy_state()
new_model.set_classy_state(state)
# TODO: test get state for new_model and make sure
# it is the same as state,
# Currently allclose is not supported in quantized tensors
# so we can't check this right now
def test_build_preset_model(self):
configs = [
{"name": "resnet18", "use_se": True},
{
"name": "resnet50",
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-2",
"in_plane": 2048,
}
],
},
{
"name": "resnext50_32x4d",
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-2",
"in_plane": 2048,
}
],
},
]
for config in configs:
model = build_model(config)
self.assertIsInstance(model, ResNeXt)
def test_small_resnext(self):
self._test_model(MODELS["small_resnext"])
@unittest.skipIf(
get_torch_version() < [1, 13],
"This test is using a new api of FX Graph Mode Quantization which is only available after 1.13",
)
def test_quantized_small_resnext(self):
self._test_quantize_model(MODELS["small_resnext"])
def test_small_resnet(self):
self._test_model(MODELS["small_resnet"])
@unittest.skipIf(
get_torch_version() < [1, 13],
"This test is using a new api of FX Graph Mode Quantization which is only available after 1.13",
)
def test_quantized_small_resnet(self):
self._test_quantize_model(MODELS["small_resnet"])
def test_small_resnet_se(self):
self._test_model(MODELS["small_resnet_se"])
@unittest.skipIf(
get_torch_version() < [1, 13],
"This test is using a new api of FX Graph Mode Quantization which is only available after 1.13",
)
def test_quantized_small_resnet_se(self):
self._test_quantize_model(MODELS["small_resnet_se"])
class TestTorchvisionEquivalence(unittest.TestCase):
@staticmethod
def tensor_sizes(state):
size_count = collections.defaultdict(int)
for key, value in state.items():
if key.startswith("fc."):
continue # "head" for torchvision
size_count[value.size()] += 1
return dict(size_count)
def assert_tensor_sizes_match_torchvision(self, model_name):
classy_model = build_model({"name": model_name})
torchvision_model = getattr(torchvision.models, model_name)(pretrained=False)
classy_sizes = self.tensor_sizes(
classy_model.get_classy_state()["model"]["trunk"]
)
torchvision_sizes = self.tensor_sizes(torchvision_model.state_dict())
self.assertEqual(
classy_sizes,
torchvision_sizes,
f"{model_name} tensor shapes do not match torchvision",
)
def test_resnet18(self):
"""Resnet18 tensor shapes should match torchvision."""
self.assert_tensor_sizes_match_torchvision("resnet18")
def test_resnet34(self):
"""Resnet34 tensor shapes should match torchvision."""
self.assert_tensor_sizes_match_torchvision("resnet34")
def test_resnet50(self):
"""Resnet50 tensor shapes should match torchvision."""
self.assert_tensor_sizes_match_torchvision("resnet50")
def test_resnext50_32x4d(self):
"""Resnext50_32x4d tensor shapes should match torchvision."""
self.assert_tensor_sizes_match_torchvision("resnext50_32x4d")
| ClassyVision-main | test/models_resnext_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
)
from classy_vision.dataset.transforms.util import build_field_transform_default_imagenet
class LightingTransformTest(unittest.TestCase):
def get_test_image_dataset(self):
return RandomImageBinaryClassDataset(
crop_size=224, class_ratio=0.5, num_samples=100, seed=0
)
def test_lighting_transform_no_errors(self):
"""
Tests that the lighting transform runs without any errors.
"""
dataset = self.get_test_image_dataset()
config = [{"name": "ToTensor"}, {"name": "lighting"}]
transform = build_field_transform_default_imagenet(config)
sample = dataset[0]
try:
# test that lighting has been registered and runs without errors
transform(sample)
except Exception:
self.fail("LightingTransform raised an exception")
return
| ClassyVision-main | test/dataset_transforms_lighting_transform_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import unittest.mock as mock
from itertools import product
from classy_vision.hooks import ClassyHook, LossLrMeterLoggingHook
from classy_vision.optim.param_scheduler import ClassyParamScheduler, UpdateInterval
from classy_vision.tasks import build_task, ClassyTask
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_test_mlp_task_config, get_test_task_config
from test.generic.hook_test_utils import HookTestBase
class TestLossLrMeterLoggingHook(HookTestBase):
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {"log_freq": 1}
invalid_config = copy.deepcopy(config)
invalid_config["log_freq"] = "this is not an int"
self.constructor_test_helper(
config=config,
hook_type=LossLrMeterLoggingHook,
hook_registry_name="loss_lr_meter_logging",
invalid_configs=[invalid_config],
)
@mock.patch("classy_vision.hooks.loss_lr_meter_logging_hook.get_rank")
def test_logging(self, mock_get_rank: mock.MagicMock) -> None:
"""
Test that the logging happens as expected and the loss and lr values are
correct.
"""
rank = 5
mock_get_rank.return_value = rank
# set up the task and state
config = get_test_task_config()
config["dataset"]["train"]["batchsize_per_replica"] = 2
config["dataset"]["test"]["batchsize_per_replica"] = 5
task = build_task(config)
task.prepare()
task.on_start()
task.on_phase_start()
losses = [1.2, 2.3, 3.4, 4.5]
task.phase_idx = 0
for log_freq in [5, None]:
# create a loss lr meter hook
loss_lr_meter_hook = LossLrMeterLoggingHook(log_freq=log_freq)
# check that _log_loss_lr_meters() is called after on_step() every
# log_freq batches and after on_phase_end()
with mock.patch.object(
loss_lr_meter_hook, "_log_loss_lr_meters"
) as mock_fn:
num_batches = 20
for i in range(num_batches):
task.losses = list(range(i))
loss_lr_meter_hook.on_step(task)
if log_freq is not None and i and i % log_freq == 0:
mock_fn.assert_called()
mock_fn.reset_mock()
continue
mock_fn.assert_not_called()
loss_lr_meter_hook.on_phase_end(task)
mock_fn.assert_called()
# test _log_loss_lr_meters()
task.losses = losses
with self.assertLogs():
loss_lr_meter_hook._log_loss_lr_meters(task)
task.phase_idx += 1
def test_logged_lr(self):
class SchedulerMock(ClassyParamScheduler):
def __call__(self, where):
return where
mock_lr_scheduler = SchedulerMock(UpdateInterval.STEP)
config = get_test_mlp_task_config()
config["num_epochs"] = 3
config["dataset"]["train"]["batchsize_per_replica"] = 10
config["dataset"]["test"]["batchsize_per_replica"] = 5
task = build_task(config)
task.set_optimizer_schedulers({"lr": mock_lr_scheduler})
trainer = LocalTrainer()
# 2 LR updates per epoch = 6
lr_order = [0.0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6]
lr_list = []
class LRLoggingHook(ClassyHook):
on_end = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_start = ClassyHook._noop
def on_step(self, task):
if task.train:
lr_list.append(task.optimizer.options_view.lr)
hook = LRLoggingHook()
task.set_hooks([hook])
trainer.train(task)
self.assertEqual(lr_list, lr_order)
| ClassyVision-main | test/hooks_loss_lr_meter_logging_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import random
import unittest
import numpy
import torch
import torchvision.transforms as transforms
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import build_transforms
from classy_vision.dataset.transforms.util import (
build_field_transform_default_imagenet,
GenericImageTransform,
ImagenetAugmentTransform,
ImagenetNoAugmentTransform,
)
def _apply_transform_to_key_and_copy(sample, transform, key, seed=0):
"""
This helper function takes a sample, makes a copy, applies the
provided transform to the appropriate key in the copied sample and
returns the copy. It's solely to help make sure the copying /
random seed happens correctly throughout the file.
It is useful for constructing the expected sample field in the
transform checks.
"""
expected_sample = copy.deepcopy(sample)
torch.manual_seed(seed)
numpy.random.seed(seed)
random.seed(seed)
is_tuple = False
if isinstance(expected_sample, tuple):
expected_sample = list(expected_sample)
is_tuple = True
expected_sample[key] = transform(expected_sample[key])
return tuple(expected_sample) if is_tuple else expected_sample
class DatasetTransformsUtilTest(unittest.TestCase):
def get_test_image_dataset(self, sample_type):
return RandomImageBinaryClassDataset(
crop_size=224,
class_ratio=0.5,
num_samples=100,
seed=0,
sample_type=sample_type,
)
def transform_checks(self, sample, transform, expected_sample, seed=0):
"""
This helper function applies the transform to the sample
and verifies that the output is the expected_sample. The
sole purpose is to make sure copying / random seed / checking
all of the fields in the sample happens correctly.
"""
transformed_sample = copy.deepcopy(sample)
torch.manual_seed(seed)
numpy.random.seed(seed)
random.seed(seed)
transformed_sample = transform(transformed_sample)
if isinstance(expected_sample, (tuple, list)):
for transformed, exp in zip(transformed_sample, expected_sample):
if torch.is_tensor(exp):
self.assertTrue(torch.allclose(transformed, exp))
if isinstance(expected_sample, dict):
for key, exp_val in expected_sample.items():
self.assertTrue(key in transformed_sample)
if torch.is_tensor(exp_val):
self.assertTrue(torch.allclose(transformed_sample[key], exp_val))
elif isinstance(exp_val, float):
self.assertAlmostEqual(transformed_sample[key], exp_val)
else:
self.assertEqual(transformed_sample[key], exp_val)
def test_build_dict_field_transform_default_imagenet(self):
dataset = self.get_test_image_dataset(SampleType.DICT)
# should apply the transform in the config
config = [{"name": "ToTensor"}]
default_transform = transforms.Compose(
[transforms.CenterCrop(100), transforms.ToTensor()]
)
transform = build_field_transform_default_imagenet(
config, default_transform=default_transform
)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
sample, transforms.ToTensor(), "input"
)
self.transform_checks(sample, transform, expected_sample)
# should apply default_transform
config = None
transform = build_field_transform_default_imagenet(
config, default_transform=default_transform
)
expected_sample = _apply_transform_to_key_and_copy(
sample, default_transform, "input"
)
self.transform_checks(sample, transform, expected_sample)
# should apply the transform for a test split
transform = build_field_transform_default_imagenet(config, split="test")
expected_sample = _apply_transform_to_key_and_copy(
sample, ImagenetNoAugmentTransform(), "input"
)
self.transform_checks(sample, transform, expected_sample)
def test_build_tuple_field_transform_default_imagenet(self):
dataset = self.get_test_image_dataset(SampleType.TUPLE)
# should apply the transform in the config
config = [{"name": "ToTensor"}]
default_transform = transforms.Compose(
[transforms.CenterCrop(100), transforms.ToTensor()]
)
transform = build_field_transform_default_imagenet(
config, default_transform=default_transform, key=0, key_map_transform=None
)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
sample, transforms.ToTensor(), 0
)
self.transform_checks(sample, transform, expected_sample)
# should apply default_transform
config = None
transform = build_field_transform_default_imagenet(
config, default_transform=default_transform, key=0, key_map_transform=None
)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(sample, default_transform, 0)
self.transform_checks(sample, transform, expected_sample)
# should apply the transform for a test split
transform = build_field_transform_default_imagenet(
config, split="test", key=0, key_map_transform=None
)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
sample, ImagenetNoAugmentTransform(), 0
)
self.transform_checks(sample, transform, expected_sample)
def test_apply_transform_to_key_from_config(self):
dataset = self.get_test_image_dataset(SampleType.DICT)
config = [
{
"name": "apply_transform_to_key",
"transforms": [{"name": "ToTensor"}],
"key": "input",
}
]
transform = build_transforms(config)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
sample, transforms.ToTensor(), "input"
)
self.transform_checks(sample, transform, expected_sample)
def test_generic_image_transform(self):
dataset = self.get_test_image_dataset(SampleType.TUPLE)
# Check constructor asserts
with self.assertRaises(AssertionError):
transform = GenericImageTransform(
split="train", transform=transforms.ToTensor()
)
transform = GenericImageTransform(split="valid", transform=None)
# Check class constructor
transform = GenericImageTransform(transform=None)
PIL_sample = dataset[0]
tensor_sample = (transforms.ToTensor()(PIL_sample[0]), PIL_sample[1])
expected_sample = {
"input": copy.deepcopy(tensor_sample[0]),
"target": copy.deepcopy(tensor_sample[1]),
}
self.transform_checks(tensor_sample, transform, expected_sample)
transform = GenericImageTransform(transform=transforms.ToTensor())
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
{"input": sample[0], "target": sample[1]}, transforms.ToTensor(), "input"
)
self.transform_checks(sample, transform, expected_sample)
transform = GenericImageTransform(split="train")
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
{"input": sample[0], "target": sample[1]},
ImagenetAugmentTransform(),
"input",
)
self.transform_checks(sample, transform, expected_sample)
transform = GenericImageTransform(split="test")
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
{"input": sample[0], "target": sample[1]},
ImagenetNoAugmentTransform(),
"input",
)
self.transform_checks(sample, transform, expected_sample)
# Check from_config constructor / registry
config = [
{"name": "generic_image_transform", "transforms": [{"name": "ToTensor"}]}
]
transform = build_transforms(config)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
{"input": sample[0], "target": sample[1]}, transforms.ToTensor(), "input"
)
self.transform_checks(sample, transform, expected_sample)
# Check with Imagenet defaults
config = [{"name": "generic_image_transform", "split": "train"}]
transform = build_transforms(config)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
{"input": sample[0], "target": sample[1]},
ImagenetAugmentTransform(),
"input",
)
self.transform_checks(sample, transform, expected_sample)
config = [{"name": "generic_image_transform", "split": "test"}]
transform = build_transforms(config)
sample = dataset[0]
expected_sample = _apply_transform_to_key_and_copy(
{"input": sample[0], "target": sample[1]},
ImagenetNoAugmentTransform(),
"input",
)
self.transform_checks(sample, transform, expected_sample)
| ClassyVision-main | test/dataset_transforms_util_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.heads import build_head, ClassyHead, register_head
class TestClassyHead(unittest.TestCase):
@register_head("dummy_head")
class DummyHead(ClassyHead):
def __init__(self, unique_id, num_classes, in_plane):
super().__init__(unique_id, num_classes)
self.fc = torch.nn.Linear(in_plane, num_classes)
def forward(self, x):
return self.fc(x)
@classmethod
def from_config(cls, config):
return cls(config["unique_id"], config["num_classes"], config["in_plane"])
def _get_config(self):
return {
"name": "dummy_head",
"num_classes": 3,
"unique_id": "cortex_dummy_head",
"fork_block": "block3",
"in_plane": 2048,
}
def test_build_head(self):
config = self._get_config()
head = build_head(config)
self.assertEqual(head.unique_id, config["unique_id"])
del config["unique_id"]
with self.assertRaises(AssertionError):
head = build_head(config)
def test_forward(self):
config = self._get_config()
head = build_head(config)
input = torch.randn(1, config["in_plane"])
output = head(input)
self.assertEqual(output.size(), torch.Size([1, 3]))
def _get_pass_through_config(self):
return {
"name": "identity",
"num_classes": 3,
"unique_id": "cortex_pass_through_head",
"fork_block": "block3",
"in_plane": 4,
}
def test_identity_forward(self):
config = self._get_pass_through_config()
head = build_head(config)
input = torch.randn(1, config["in_plane"])
output = head(input)
self.assertEqual(input.size(), output.size())
self.assert_(torch.all(torch.eq(input, output)))
| ClassyVision-main | test/classy_vision_head_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.generic.util import get_torch_version
from classy_vision.models import build_model, ClassyModel
class TestMLPModel(unittest.TestCase):
def test_build_model(self):
config = {"name": "mlp", "input_dim": 3, "output_dim": 1, "hidden_dims": [2]}
model = build_model(config)
self.assertTrue(isinstance(model, ClassyModel))
tensor = torch.tensor([[1, 2, 3]], dtype=torch.float)
output = model.forward(tensor)
self.assertEqual(output.shape, torch.Size([1, 1]))
tensor = torch.tensor([[1, 2, 3], [1, 2, 3]], dtype=torch.float)
output = model.forward(tensor)
self.assertEqual(output.shape, torch.Size([2, 1]))
@unittest.skipIf(
get_torch_version() < [1, 13],
"This test is using a new api of FX Graph Mode Quantization which is only available after 1.13",
)
def test_quantize_model(self):
import torch.ao.quantization as tq
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx
config = {"name": "mlp", "input_dim": 3, "output_dim": 1, "hidden_dims": [2]}
model = build_model(config)
self.assertTrue(isinstance(model, ClassyModel))
model.eval()
example_inputs = (torch.rand(1, 3),)
model.mlp = prepare_fx(model.mlp, {"": tq.default_qconfig}, example_inputs)
model.mlp = convert_fx(model.mlp)
tensor = torch.tensor([[1, 2, 3]], dtype=torch.float)
output = model.forward(tensor)
self.assertEqual(output.shape, torch.Size([1, 1]))
tensor = torch.tensor([[1, 2, 3], [1, 2, 3]], dtype=torch.float)
output = model.forward(tensor)
self.assertEqual(output.shape, torch.Size([2, 1]))
| ClassyVision-main | test/models_mlp_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import importlib
import os
import shutil
import tempfile
import torch
from classy_vision.generic.util import load_checkpoint
from classy_vision.hooks import CheckpointHook
from classy_vision.tasks import build_task
from classy_vision.trainer import LocalTrainer
from fvcore.common import file_io
from fvcore.common.file_io import PathHandler, PathManager
from test.generic.config_utils import get_fast_test_task_config, get_test_task_config
from test.generic.hook_test_utils import HookTestBase
class TestException(Exception):
pass
class TestPathHandler(PathHandler):
PREFIX = "test://"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _exists(self, *args, **kwargs):
return True
def _isdir(self, *args, **kwargs):
return True
def _open(self, *args, **kwargs):
raise TestException()
class TestCheckpointHook(HookTestBase):
def setUp(self) -> None:
self.base_dir = tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.base_dir)
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {
"checkpoint_folder": "/test/",
"input_args": {"foo": "bar"},
"phase_types": ["train"],
"checkpoint_period": 2,
}
invalid_config = copy.deepcopy(config)
invalid_config["checkpoint_folder"] = 12
self.constructor_test_helper(
config=config,
hook_type=CheckpointHook,
hook_registry_name="checkpoint",
invalid_configs=[invalid_config],
)
def test_failure(self) -> None:
self.assertFalse(PathManager.exists("test://foo"))
PathManager.register_handler(TestPathHandler())
# make sure that TestPathHandler is being used
self.assertTrue(PathManager.exists("test://foo"))
checkpoint_folder = "test://root"
checkpoint_hook = CheckpointHook(checkpoint_folder, {}, phase_types=["train"])
config = get_test_task_config()
task = build_task(config)
task.prepare()
# we should raise an exception while trying to save the checkpoint
with self.assertRaises(TestException):
checkpoint_hook.on_phase_end(task)
def test_state_checkpointing(self) -> None:
"""
Test that the state gets checkpointed without any errors, but only on the
right phase_type and only if the checkpoint directory exists.
"""
config = get_test_task_config()
task = build_task(config)
task.prepare()
checkpoint_folder = self.base_dir + "/checkpoint_end_test/"
input_args = {"foo": "bar"}
# create a checkpoint hook
checkpoint_hook = CheckpointHook(
checkpoint_folder, input_args, phase_types=["train"]
)
# checkpoint directory doesn't exist
# call the on start function
with self.assertRaises(FileNotFoundError):
checkpoint_hook.on_start(task)
# call the on end phase function
with self.assertRaises(AssertionError):
checkpoint_hook.on_phase_end(task)
# try loading a non-existent checkpoint
checkpoint = load_checkpoint(checkpoint_folder)
self.assertIsNone(checkpoint)
# create checkpoint dir, verify on_start hook runs
os.mkdir(checkpoint_folder)
checkpoint_hook.on_start(task)
# Phase_type is test, expect no checkpoint
task.train = False
# call the on end phase function
checkpoint_hook.on_phase_end(task)
checkpoint = load_checkpoint(checkpoint_folder)
self.assertIsNone(checkpoint)
task.train = True
# call the on end phase function
checkpoint_hook.on_phase_end(task)
# model should be checkpointed. load and compare
checkpoint = load_checkpoint(checkpoint_folder)
self.assertIsNotNone(checkpoint)
for key in ["input_args", "classy_state_dict"]:
self.assertIn(key, checkpoint)
# not testing for equality of classy_state_dict, that is tested in
# a separate test
self.assertDictEqual(checkpoint["input_args"], input_args)
def test_checkpoint_period(self) -> None:
"""
Test that the checkpoint_period works as expected.
"""
config = get_test_task_config()
task = build_task(config)
task.prepare()
checkpoint_folder = self.base_dir + "/checkpoint_end_test/"
checkpoint_period = 10
for phase_types in [["train"], ["train", "test"]]:
# create a checkpoint hook
checkpoint_hook = CheckpointHook(
checkpoint_folder,
{},
phase_types=phase_types,
checkpoint_period=checkpoint_period,
)
# create checkpoint dir
os.mkdir(checkpoint_folder)
# call the on start function
checkpoint_hook.on_start(task)
# shouldn't create any checkpoints until there are checkpoint_period
# phases which are in phase_types
count = 0
valid_phase_count = 0
while valid_phase_count < checkpoint_period - 1:
task.train = count % 2 == 0
# call the on end phase function
checkpoint_hook.on_phase_end(task)
checkpoint = load_checkpoint(checkpoint_folder)
self.assertIsNone(checkpoint)
valid_phase_count += 1 if task.phase_type in phase_types else 0
count += 1
# create a phase which is in phase_types
task.train = True
# call the on end phase function
checkpoint_hook.on_phase_end(task)
# model should be checkpointed. load and compare
checkpoint = load_checkpoint(checkpoint_folder)
self.assertIsNotNone(checkpoint)
# delete the checkpoint dir
shutil.rmtree(checkpoint_folder)
def test_checkpointing(self):
# make checkpoint directory
checkpoint_folder = self.base_dir + "/checkpoint/"
os.mkdir(checkpoint_folder)
config = get_fast_test_task_config()
cuda_available = torch.cuda.is_available()
task = build_task(config)
task.prepare()
# create a checkpoint hook
checkpoint_hook = CheckpointHook(checkpoint_folder, {}, phase_types=["train"])
# call the on end phase function
checkpoint_hook.on_phase_end(task)
# we should be able to train a task using the checkpoint on all available
# devices
for use_gpu in {False, cuda_available}:
# load the checkpoint
checkpoint = load_checkpoint(checkpoint_folder)
# create a new task
task = build_task(config)
# set the checkpoint
task._set_checkpoint_dict(checkpoint)
task.set_use_gpu(use_gpu)
# we should be able to run the trainer using the checkpoint
trainer = LocalTrainer()
trainer.train(task)
| ClassyVision-main | test/hooks_checkpoint_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.dataset import build_dataset
from classy_vision.hooks import LossLrMeterLoggingHook
from classy_vision.losses import build_loss
from classy_vision.meters import AccuracyMeter
from classy_vision.models import build_model
from classy_vision.optim import build_optimizer, build_optimizer_schedulers
from classy_vision.tasks import ClassificationTask
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_test_mlp_task_config
class TestLocalTrainer(unittest.TestCase):
def test_training(self):
"""Checks we can train a small MLP model."""
config = get_test_mlp_task_config()
task = (
ClassificationTask()
.set_num_epochs(10)
.set_loss(build_loss(config["loss"]))
.set_model(build_model(config["model"]))
.set_optimizer(build_optimizer(config["optimizer"]))
.set_optimizer_schedulers(build_optimizer_schedulers(config["optimizer"]))
.set_meters([AccuracyMeter(topk=[1])])
.set_hooks([LossLrMeterLoggingHook()])
)
for split in ["train", "test"]:
dataset = build_dataset(config["dataset"][split])
task.set_dataset(dataset, split)
self.assertTrue(task is not None)
trainer = LocalTrainer()
trainer.train(task)
accuracy = task.meters[0].value["top_1"]
self.assertAlmostEqual(accuracy, 1.0)
| ClassyVision-main | test/trainer_local_trainer_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.losses import build_loss, SoftTargetCrossEntropyLoss
class TestSoftTargetCrossEntropyLoss(unittest.TestCase):
def _get_config(self):
return {
"name": "soft_target_cross_entropy",
"ignore_index": -1,
"reduction": "mean",
}
def _get_outputs(self):
return torch.tensor([[1.0, 7.0, 0.0, 0.0, 2.0]])
def _get_targets(self):
return torch.tensor([[1, 0, 0, 0, 1]])
def _get_loss(self):
return 5.51097965
def test_build_soft_target_cross_entropy(self):
config = self._get_config()
crit = build_loss(config)
self.assertTrue(isinstance(crit, SoftTargetCrossEntropyLoss))
self.assertEqual(crit._ignore_index, -1)
self.assertEqual(crit._reduction, "mean")
def test_soft_target_cross_entropy(self):
config = self._get_config()
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), self._get_loss())
# Verify ignore index works
outputs = self._get_outputs()
targets = torch.tensor([[-1, 0, 0, 0, 1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.01097918)
def test_soft_target_cross_entropy_none_reduction(self):
# reduction mode is "none"
config = self._get_config()
config["reduction"] = "none"
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[1.0, 7.0, 0.0, 0.0, 2.0], [4.0, 2.0, 1.0, 6.0, 0.5]])
targets = torch.tensor([[1, 0, 0, 0, 1], [0, 1, 0, 1, 0]])
loss = crit(outputs, targets)
self.assertEqual(loss.numel(), outputs.size(0))
def test_soft_target_cross_entropy_integer_label(self):
config = self._get_config()
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = self._get_outputs()
targets = torch.tensor([4])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.01097918)
def test_unnormalized_soft_target_cross_entropy(self):
config = {
"name": "soft_target_cross_entropy",
"ignore_index": -1,
"reduction": "mean",
"normalize_targets": False,
}
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 11.0219593)
# Verify ignore index works
outputs = self._get_outputs()
targets = torch.tensor([[-1, 0, 0, 0, 1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.01097965)
def test_ignore_row(self):
# If a sample has no valid targets, it should be ignored in the reduction.
config = self._get_config()
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[1.0, 7.0, 0.0, 0.0, 2.0], [4.0, 2.0, 1.0, 6.0, 0.5]])
targets = torch.tensor([[1, 0, 0, 0, 1], [-1, -1, -1, -1, -1]])
self.assertAlmostEqual(crit(outputs, targets).item(), self._get_loss())
def test_deep_copy(self):
config = self._get_config()
crit = build_loss(config)
self.assertTrue(isinstance(crit, SoftTargetCrossEntropyLoss))
outputs = self._get_outputs()
targets = self._get_targets()
crit(outputs, targets)
crit2 = copy.deepcopy(crit)
self.assertAlmostEqual(crit2(outputs, targets).item(), self._get_loss())
| ClassyVision-main | test/losses_soft_target_cross_entropy_loss_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.generic.util import get_torch_version
from classy_vision.losses import build_loss
class CriterionsTest(unittest.TestCase):
"""
Test that build_transform is able to build torch losses correctly.
"""
def _test_loss(self, config, output, target, expected_loss):
# test that we are able to build losses from torch.nn.modules.loss
# and that they work correctly
crit = build_loss(config)
# test that the weights are set correctly
self.assertAlmostEqual(crit.weight.numpy().tolist(), [1.0, 1.0])
# test that the loss is computed correctly
self.assertAlmostEqual(crit(output, target).item(), expected_loss)
# verify ignore index works
if "ignore_index" in config:
if get_torch_version() < [1, 11]:
self.assertAlmostEqual(crit(output, torch.tensor([-1])).item(), 0.0)
else:
self.assertTrue(torch.isnan(crit(output, torch.tensor([-1]))).item())
def test_cross_entropy_loss(self):
"""
Test CrossEntropyLoss
"""
config = {
"name": "CrossEntropyLoss",
"weight": [1.0, 1.0],
"ignore_index": -1,
"reduction": "mean",
}
output = torch.tensor([[9.0, 1.0]])
target = torch.tensor([1])
expected_loss = 8.000335693359375
self._test_loss(config, output, target, expected_loss)
def test_bce_with_logits_loss(self):
"""
Test BCEWithLogitsLoss
"""
config = {
"name": "BCEWithLogitsLoss",
"weight": [1.0, 1.0],
"reduction": "mean",
}
output = torch.tensor([0.999, 0.999])
target = torch.tensor([1.0, 1.0])
expected_loss = 0.313530727260701
self._test_loss(config, output, target, expected_loss)
| ClassyVision-main | test/losses_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import sys
import unittest
import classy_vision
from classy_vision.dataset.transforms import GenericImageTransform
from classy_vision.optim import SGD
from classy_vision.optim.param_scheduler import LinearParamScheduler
from classy_vision.tasks import ClassificationTask
from classy_vision.trainer import LocalTrainer
from torchvision import transforms
# import the classes from the synthetic template
path = pathlib.Path(classy_vision.__file__).resolve().parent
synthetic_template_path = path / "templates" / "synthetic"
sys.path.append(str(synthetic_template_path))
from datasets.my_dataset import MyDataset # isort:skip
from losses.my_loss import MyLoss # isort:skip
from models.my_model import MyModel # isort:skip
# WARNING: The goal of this test is to use our public API as advertised in our
# tutorials and make sure everything trains successfully. If you break this
# test, make sure you also update our tutorials.
class APITest(unittest.TestCase):
def test_one(self):
train_dataset = MyDataset(
batchsize_per_replica=32,
shuffle=False,
transform=GenericImageTransform(
transform=transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
),
num_samples=100,
crop_size=224,
class_ratio=0.5,
seed=0,
)
test_dataset = MyDataset(
batchsize_per_replica=32,
shuffle=False,
transform=GenericImageTransform(
transform=transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
),
num_samples=100,
crop_size=224,
class_ratio=0.5,
seed=0,
)
model = MyModel()
loss = MyLoss()
optimizer = SGD(momentum=0.9, weight_decay=1e-4, nesterov=True)
task = (
ClassificationTask()
.set_model(model)
.set_dataset(train_dataset, "train")
.set_dataset(test_dataset, "test")
.set_loss(loss)
.set_optimizer(optimizer)
.set_optimizer_schedulers(
{"lr": LinearParamScheduler(start_value=0.01, end_value=0.009)}
)
.set_num_epochs(1)
)
trainer = LocalTrainer()
trainer.train(task)
| ClassyVision-main | test/api_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.models import build_model
from test.generic.utils import compare_model_state
class TestVisionTransformer(unittest.TestCase):
def get_vit_b_16_224_config(self):
return {
"name": "vision_transformer",
"image_size": 224,
"patch_size": 16,
"hidden_dim": 768,
"mlp_dim": 3072,
"num_heads": 12,
"num_layers": 12,
"attention_dropout_rate": 0,
"dropout_rate": 0.1,
"heads": [
{
"name": "vision_transformer_head",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "trunk_output",
"in_plane": 768,
"hidden_dim": 3072,
}
],
}
def get_vitc_b_16_224_config(self):
return {
"name": "vision_transformer",
"image_size": 224,
"patch_size": 16,
"hidden_dim": 768,
"mlp_dim": 3072,
"num_heads": 12,
"num_layers": 12,
"conv_stem_layers": [
{"kernel": 3, "stride": 2, "out_channels": 64},
{"kernel": 3, "stride": 2, "out_channels": 128},
{"kernel": 3, "stride": 1, "out_channels": 128},
{"kernel": 3, "stride": 2, "out_channels": 256},
{"kernel": 3, "stride": 1, "out_channels": 256},
{"kernel": 3, "stride": 2, "out_channels": 512},
],
"attention_dropout_rate": 0,
"dropout_rate": 0.1,
"heads": [
{
"name": "vision_transformer_head",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "trunk_output",
"in_plane": 768,
"hidden_dim": 3072,
}
],
}
def get_vit_l_32_224_config(self):
return {
"name": "vision_transformer",
"image_size": 224,
"patch_size": 32,
"hidden_dim": 1024,
"mlp_dim": 4096,
"num_heads": 16,
"num_layers": 24,
"attention_dropout_rate": 0,
"dropout_rate": 0.1,
"heads": [
{
"name": "vision_transformer_head",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "trunk_output",
"in_plane": 1024,
"hidden_dim": 4096,
}
],
}
def _test_model(
self,
model_config,
image_size=224,
expected_out_dims=1000,
test_forward_pass=True,
):
model = build_model(model_config)
if not test_forward_pass:
return
# Verify forward pass works
input = torch.ones([2, 3, image_size, image_size])
output = model.forward(input)
self.assertEqual(output.size(), (2, expected_out_dims))
# Verify get_set_state
new_model = build_model(model_config)
state = model.get_classy_state()
new_model.set_classy_state(state)
new_state = new_model.get_classy_state()
compare_model_state(self, state, new_state, check_heads=True)
def test_vit_b_16_224(self):
self._test_model(self.get_vit_b_16_224_config())
def test_vitc_b_16_224(self):
self._test_model(self.get_vitc_b_16_224_config())
def test_vit_l_32_224(self):
# testing the forward pass is slow so we skip it
self._test_model(self.get_vit_l_32_224_config(), test_forward_pass=False)
def test_all_presets(self):
for model_name, image_size, expected_out_dims in [
("vit_b_32", 32, 768),
("vit_b_16", 64, 768),
("vit_l_32", 32, 1024),
("vit_l_16", 32, 1024),
("vit_h_14", 14, 1280),
]:
# testing the forward pass is slow so we skip it
self._test_model(
{"name": model_name, "image_size": image_size},
image_size,
expected_out_dims,
test_forward_pass=False,
)
def test_resolution_change(self):
vit_b_16_224_config = self.get_vit_b_16_224_config()
vit_b_16_896_config = copy.deepcopy(vit_b_16_224_config)
vit_b_16_896_config["image_size"] = 896
vit_b_16_224_model = build_model(vit_b_16_224_config)
vit_b_16_896_model = build_model(vit_b_16_896_config)
# test state transfer from both resolutions
vit_b_16_224_model.set_classy_state(vit_b_16_896_model.get_classy_state())
vit_b_16_896_model.set_classy_state(vit_b_16_224_model.get_classy_state())
vit_b_16_448_config = copy.deepcopy(vit_b_16_224_config)
vit_b_16_448_config["image_size"] = 448
vit_b_16_448_model = build_model(vit_b_16_448_config)
# downsampling from 896 -> 448 -> 224 should give similar results to 896 -> 224
vit_b_16_448_model.set_classy_state(vit_b_16_896_model.get_classy_state())
vit_b_16_224_model.set_classy_state(vit_b_16_448_model.get_classy_state())
vit_b_16_224_model_2 = build_model(vit_b_16_224_config)
vit_b_16_224_model_2.set_classy_state(vit_b_16_896_model.get_classy_state())
# we should have similar position embeddings in both models
state_1 = vit_b_16_224_model.get_classy_state()["model"]["trunk"][
"encoder.pos_embedding"
]
state_2 = vit_b_16_224_model_2.get_classy_state()["model"]["trunk"][
"encoder.pos_embedding"
]
diff = state_1 - state_2
self.assertLess(diff.norm() / min(state_1.norm(), state_2.norm()), 0.1)
| ClassyVision-main | test/models_vision_transformer_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.tasks import build_task
from test.generic.config_utils import get_test_task_config
class TestDataloaderLimitWrapper(unittest.TestCase):
def _test_number_of_batches(self, data_iterator, expected_batches):
num_batches = 0
for _ in data_iterator:
num_batches += 1
self.assertEqual(num_batches, expected_batches)
def test_streaming_dataset(self):
"""
Test that streaming datasets return the correct number of batches, and that
the length is also calculated correctly.
"""
config = get_test_task_config()
dataset_config = {
"name": "synthetic_image_streaming",
"split": "train",
"crop_size": 224,
"class_ratio": 0.5,
"num_samples": 2000,
"length": 4000,
"seed": 0,
"batchsize_per_replica": 32,
"use_shuffle": True,
}
expected_batches = 62
config["dataset"]["train"] = dataset_config
task = build_task(config)
task.prepare()
task.advance_phase()
# test that the number of batches expected is correct
self.assertEqual(task.num_batches_per_phase, expected_batches)
# test that the data iterator returns the expected number of batches
data_iterator = task.data_iterator
self._test_number_of_batches(data_iterator, expected_batches)
# test that the dataloader can be rebuilt
task.build_dataloaders_for_current_phase()
task.create_data_iterators()
data_iterator = task.data_iterator
self._test_number_of_batches(data_iterator, expected_batches)
| ClassyVision-main | test/dataset_dataloader_limit_wrapper_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from test.generic.utils import ClassyTestCase
class TestClassyTestCase(unittest.TestCase):
def test_assert_torch_all_close(self):
test_fixture = ClassyTestCase()
data = [1.1, 2.2]
tensor_1 = torch.Tensor(data)
# shouldn't raise an exception
tensor_2 = tensor_1
test_fixture.assertTorchAllClose(tensor_1, tensor_2)
# should fail because tensors are not close
tensor_2 = tensor_1 / 2
with self.assertRaises(AssertionError):
test_fixture.assertTorchAllClose(tensor_1, tensor_2)
# should fail because tensor_2 is not a tensor
tensor_2 = data
with self.assertRaises(AssertionError):
test_fixture.assertTorchAllClose(tensor_1, tensor_2)
# should fail because tensor_1 is not a tensor
tensor_1 = data
tensor_2 = torch.Tensor(data)
with self.assertRaises(AssertionError):
test_fixture.assertTorchAllClose(tensor_1, tensor_2)
| ClassyVision-main | test/test_generic_utils_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
import unittest
import unittest.mock as mock
import torch
import torch.nn as nn
from classy_vision.hooks import ExponentialMovingAverageModelHook
from classy_vision.models import ClassyModel
from test.generic.hook_test_utils import HookTestBase
class TestModel(ClassyModel):
def __init__(self):
super().__init__()
self.fc = nn.Linear(10, 10)
self.bn = nn.BatchNorm1d(10)
def init_fc_weight(self):
nn.init.zeros_(self.fc.weight)
def update_fc_weight(self):
nn.init.ones_(self.fc.weight)
def forward(self, x):
return self.bn(self.fc(x))
class TestExponentialMovingAverageModelHook(HookTestBase):
def _map_device_string(self, device):
return "cuda" if device == "gpu" else "cpu"
def _test_exponential_moving_average_hook(self, model_device, hook_device):
task = mock.MagicMock()
model = TestModel().to(device=self._map_device_string(model_device))
task.base_model = model
task.train = True
decay = 0.5
num_updates = 10
model.init_fc_weight()
exponential_moving_average_hook = ExponentialMovingAverageModelHook(
decay=decay, device=hook_device
)
exponential_moving_average_hook.on_start(task)
exponential_moving_average_hook.on_phase_start(task)
# set the weights to all ones and simulate 10 updates
task.base_model.update_fc_weight()
fc_weight = model.fc.weight.clone()
for _ in range(num_updates):
exponential_moving_average_hook.on_step(task)
exponential_moving_average_hook.on_phase_end(task)
# the model weights shouldn't have changed
self.assertTrue(torch.allclose(model.fc.weight, fc_weight))
# simulate a test phase now
task.train = False
exponential_moving_average_hook.on_phase_start(task)
exponential_moving_average_hook.on_phase_end(task)
# the model weights should be updated to the ema weights
self.assertTrue(
torch.allclose(
model.fc.weight, fc_weight * (1 - math.pow(1 - decay, num_updates))
)
)
# simulate a train phase again
task.train = True
exponential_moving_average_hook.on_phase_start(task)
# the model weights should be back to the old value
self.assertTrue(torch.allclose(model.fc.weight, fc_weight))
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {"decay": 0.5, "consider_bn_buffers": True, "device": "cpu"}
invalid_config1 = copy.deepcopy(config)
del invalid_config1["decay"]
invalid_config2 = copy.deepcopy(config)
invalid_config2["device"] = "crazy_hardware"
self.constructor_test_helper(
config=config,
hook_type=ExponentialMovingAverageModelHook,
hook_registry_name="ema_model_weights",
invalid_configs=[invalid_config1, invalid_config2],
)
def test_get_model_state_iterator(self):
device = "gpu" if torch.cuda.is_available() else "cpu"
model = TestModel().to(device=self._map_device_string(device))
decay = 0.5
# test that we pick up the right parameters in the iterator
for consider_bn_buffers in [True, False]:
exponential_moving_average_hook = ExponentialMovingAverageModelHook(
decay=decay, consider_bn_buffers=consider_bn_buffers, device=device
)
iterable = exponential_moving_average_hook.get_model_state_iterator(model)
fc_found = False
bn_found = False
bn_buffer_found = False
for _, param in iterable:
if any(param is item for item in model.fc.parameters()):
fc_found = True
if any(param is item for item in model.bn.parameters()):
bn_found = True
if any(param is item for item in model.bn.buffers()):
bn_buffer_found = True
self.assertTrue(fc_found)
self.assertTrue(bn_found)
self.assertEqual(bn_buffer_found, consider_bn_buffers)
def test_exponential_moving_average_hook(self):
device = "gpu" if torch.cuda.is_available() else "cpu"
self._test_exponential_moving_average_hook(device, device)
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_mixed_devices(self):
"""Tests that the hook works when the model and hook's device are different"""
self._test_exponential_moving_average_hook("cpu", "gpu")
self._test_exponential_moving_average_hook("gpu", "cpu")
| ClassyVision-main | test/hooks_exponential_moving_average_model_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import PrecisionAtKMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestPrecisionAtKMeter(ClassificationMeterTest):
def test_precision_meter_registry(self):
meter = meters.build_meter({"name": "precision_at_k", "topk": [1, 3]})
self.assertTrue(isinstance(meter, PrecisionAtKMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = PrecisionAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0?1
]
)
# One-hot encoding, 1 = positive for class
# sample-1: 1, sample-2: 0, sample-3: 0,1,2
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
# Note for ties, we select randomly, so we should not use ambiguous ties
expected_value = {"top_1": 2 / 3.0, "top_2": 4 / 6.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_double_meter_update_and_reset(self):
meter = PrecisionAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]),
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
]
# One-hot encoding, 1 = positive for class
# batch-1: sample-1: 1, sample-2: 0, sample-3: 0,1,2
# batch-2: sample-1: 1, sample-2: 1, sample-3: 1
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]),
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
]
# First batch has top-1 precision of 2/3.0, top-2 precision of 4/6.0
# Second batch has top-1 precision of 2/3.0, top-2 precision of 2/6.0
expected_value = {"top_1": 4 / 6.0, "top_2": 6 / 12.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_meter_invalid_model_output(self):
meter = PrecisionAtKMeter(topk=[1, 2])
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[0.33, 0.33, 0.34], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]]
)
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = PrecisionAtKMeter(topk=[1, 2])
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0/1
]
)
# Target shape is of length 3
target = torch.tensor([[[0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_topk(self):
meter = PrecisionAtKMeter(topk=[1, 5])
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0/1
]
)
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
#
# Expected value is the expected value of meter1 For this test
# to work, top-1 / top-2 values of meter0 / meter1 should be
# different
meters = [PrecisionAtKMeter(topk=[1, 2]), PrecisionAtKMeter(topk=[1, 2])]
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]),
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
]
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 0]]),
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
]
# Second update's expected value
expected_value = {"top_1": 2 / 3.0, "top_2": 2 / 6.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [PrecisionAtKMeter(topk=[1, 2]), PrecisionAtKMeter(topk=[1, 2])]
# Batchsize = 3, num classes = 3, score is probability of class
model_outputs = [
torch.tensor(
[[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]
), # Meter 0
torch.tensor(
[[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]
), # Meter 1
torch.tensor(
[[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]
), # Meter 0
torch.tensor(
[[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]
), # Meter 1
]
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]), # Meter 0
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]), # Meter 1
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]), # Meter 0
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]), # Meter 1
]
# In first two updates there are 4 correct top-1, 6 correct in top 2
# The same occurs in the second two updates and is added to first
expected_values = [
{"top_1": 4 / 6.0, "top_2": 6 / 12.0}, # After one update to each meter
{"top_1": 8 / 12.0, "top_2": 12 / 24.0}, # After two updates to each meter
]
self.meter_distributed_test(meters, model_outputs, targets, expected_values)
def test_non_onehot_target(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = PrecisionAtKMeter(topk=[1, 2])
# Batchsize = 2, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
torch.tensor([[0.2, 0.4, 0.4], [0.2, 0.65, 0.15], [0.1, 0.8, 0.1]]),
]
# One-hot encoding, 1 = positive for class
targets = [
torch.tensor([[1], [1], [1]]), # [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
torch.tensor([[0], [1], [2]]), # [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
]
# Note for ties, we select randomly, so we should not use ambiguous ties
# First batch has top-1 precision of 2/3.0, top-2 precision of 2/6.0
# Second batch has top-1 precision of 1/3.0, top-2 precision of 1/6.0
expected_value = {"top_1": 3 / 6.0, "top_2": 3 / 12.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_non_onehot_target_one_dim_target(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update with one dimensional targets.
"""
meter = PrecisionAtKMeter(topk=[1, 2])
# Batchsize = 2, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
torch.tensor([[0.2, 0.4, 0.4], [0.2, 0.65, 0.15], [0.1, 0.8, 0.1]]),
]
# One-hot encoding, 1 = positive for class
targets = [
torch.tensor([1, 1, 1]), # [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
torch.tensor([0, 1, 2]), # [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
]
# Note for ties, we select randomly, so we should not use ambiguous ties
# First batch has top-1 precision of 2/3.0, top-2 precision of 2/6.0
# Second batch has top-1 precision of 1/3.0, top-2 precision of 1/6.0
expected_value = {"top_1": 3 / 6.0, "top_2": 3 / 12.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_meter_fp16(self):
"""
This test verifies that the meter works if the input tensor is fp16.
"""
meter = PrecisionAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0?1
]
).half()
# One-hot encoding, 1 = positive for class
# sample-1: 1, sample-2: 0, sample-3: 0,1,2
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]).half()
# Note for ties, we select randomly, so we should not use ambiguous ties
expected_value = {"top_1": 2 / 3.0, "top_2": 4 / 6.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
| ClassyVision-main | test/meters_precision_meter_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
ConstantParamScheduler,
)
class TestFixedScheduler(unittest.TestCase):
_num_epochs = 12
def _get_valid_config(self):
return {"name": "constant", "num_epochs": self._num_epochs, "value": 0.1}
def test_invalid_config(self):
# Invalid num epochs
config = self._get_valid_config()
bad_config = copy.deepcopy(config)
del bad_config["value"]
with self.assertRaises((AssertionError, TypeError)):
ConstantParamScheduler.from_config(bad_config)
def test_scheduler(self):
config = self._get_valid_config()
scheduler = ConstantParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
self.assertEqual(schedule, expected_schedule)
# The input for the scheduler should be in the interval [0;1), open
with self.assertRaises(RuntimeError):
scheduler(1)
def test_build_constant_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, ConstantParamScheduler))
| ClassyVision-main | test/optim_param_scheduler_constant_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import shutil
import tempfile
import unittest
import torch
from classy_vision.dataset.transforms import ClassyTransform
from classy_vision.hub import ClassyHubInterface
from classy_vision.models import build_model, ClassyModel
from classy_vision.tasks import build_task, ClassyTask
from test.generic.config_utils import get_test_task_config
from torchvision import models, transforms
class TestTransform(ClassyTransform):
def __call__(self, x):
return x
class TestClassyHubInterface(unittest.TestCase):
def setUp(self):
# create a base directory to write image files to
self.base_dir = tempfile.mkdtemp()
self.image_path = self.base_dir + "/img.jpg"
# create an image with a non standard size
image_tensor = torch.zeros((3, 1000, 2500), dtype=torch.float)
transforms.ToPILImage()(image_tensor).save(self.image_path)
def tearDown(self):
# delete all the temporary data created
shutil.rmtree(self.base_dir)
def _test_predict_and_extract_features(self, hub_interface: ClassyHubInterface):
dataset = hub_interface.create_image_dataset(
image_files=[self.image_path], phase_type="test"
)
data_iterator = hub_interface.get_data_iterator(dataset)
input = next(data_iterator)
# set the model to eval mode
hub_interface.eval()
output = hub_interface.predict(input)
self.assertIsNotNone(output)
# see the prediction for the input
hub_interface.predict(input).argmax().item()
# check extract features
output = hub_interface.extract_features(input)
self.assertIsNotNone(output)
def _get_classy_model(self):
config = get_test_task_config()
model_config = config["model"]
return build_model(model_config)
def _get_non_classy_model(self):
return models.resnet18(pretrained=False)
def test_from_task(self):
config = get_test_task_config()
task = build_task(config)
hub_interface = ClassyHubInterface.from_task(task)
self.assertIsInstance(hub_interface.task, ClassyTask)
self.assertIsInstance(hub_interface.model, ClassyModel)
# this will pick up the transform from the task's config
self._test_predict_and_extract_features(hub_interface)
# test that the correct transform is picked up
phase_type = "test"
test_transform = TestTransform()
task.datasets[phase_type].transform = test_transform
hub_interface = ClassyHubInterface.from_task(task)
dataset = hub_interface.create_image_dataset(
image_files=[self.image_path], phase_type=phase_type
)
self.assertIsInstance(dataset.transform, TestTransform)
def test_from_model(self):
for model in [self._get_classy_model(), self._get_non_classy_model()]:
hub_interface = ClassyHubInterface.from_model(model)
self.assertIsNone(hub_interface.task)
self.assertIsInstance(hub_interface.model, ClassyModel)
# this will pick up the transform from imagenet
self._test_predict_and_extract_features(hub_interface)
| ClassyVision-main | test/hub_classy_hub_interface_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import shutil
import tempfile
import torch
from classy_vision.hooks import TorchscriptHook
from classy_vision.models import ResNet
from classy_vision.tasks import build_task
from test.generic.config_utils import get_test_task_config
from test.generic.hook_test_utils import HookTestBase
TORCHSCRIPT_FILE = "torchscript.pt"
class TestTorchscriptHook(HookTestBase):
def setUp(self) -> None:
self.base_dir = tempfile.mkdtemp()
self.orig_wrapper_cls = ResNet.wrapper_cls
def tearDown(self) -> None:
shutil.rmtree(self.base_dir)
ResNet.wrapper_cls = self.orig_wrapper_cls
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {"torchscript_folder": "/test/"}
invalid_config = copy.deepcopy(config)
invalid_config["torchscript_folder"] = 12
self.constructor_test_helper(
config=config,
hook_type=TorchscriptHook,
hook_registry_name="torchscript",
invalid_configs=[invalid_config],
)
def execute_hook(self, config, torchscript_folder, torchscript_hook) -> None:
task = build_task(config)
task.prepare()
# create checkpoint dir, verify on_start hook runs
os.mkdir(torchscript_folder)
torchscript_hook.on_start(task)
task.train = True
# call the on end function
torchscript_hook.on_end(task)
# load torchscript file
torchscript_file_name = (
f"{torchscript_hook.torchscript_folder}/{TORCHSCRIPT_FILE}"
)
torchscript = torch.jit.load(torchscript_file_name)
# compare model load from checkpoint vs torchscript
with torch.no_grad():
batchsize = 1
model = task.model
input_data = torch.randn(
(batchsize,) + model.input_shape, dtype=torch.float
)
if torch.cuda.is_available():
input_data = input_data.cuda()
model = model.cuda()
torchscript = torchscript.cuda()
checkpoint_out = model(input_data)
torchscript_out = torchscript(input_data)
self.assertTrue(torch.allclose(checkpoint_out, torchscript_out, atol=1e-5))
def test_torchscripting_using_trace(self):
"""
Test that the save_torchscript function works as expected with trace
"""
config = get_test_task_config()
torchscript_folder = self.base_dir + "/torchscript_end_test/"
# create a torchscript hook using trace
torchscript_hook = TorchscriptHook(torchscript_folder)
self.execute_hook(config, torchscript_folder, torchscript_hook)
def test_torchscripting_using_script(self):
"""
Test that the save_torchscript function works as expected with script
"""
config = get_test_task_config()
# Setting wrapper_cls to None to make ResNet model torchscriptable
ResNet.wrapper_cls = None
torchscript_folder = self.base_dir + "/torchscript_end_test/"
# create a torchscript hook using script
torchscript_hook = TorchscriptHook(torchscript_folder, use_trace=False)
self.execute_hook(config, torchscript_folder, torchscript_hook)
| ClassyVision-main | test/hooks_torchscript_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import shutil
import tempfile
import unittest
import torch
import torch.nn as nn
from classy_vision.generic.util import load_checkpoint
from classy_vision.heads import FullyConnectedHead, IdentityHead
from classy_vision.hooks import CheckpointHook
from classy_vision.models import (
build_model,
ClassyModel,
ClassyModelWrapper,
register_model,
)
from classy_vision.models.classy_model import _ClassyModelAdapter
from classy_vision.tasks import build_task
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_fast_test_task_config, get_test_task_config
from torchvision import models
@register_model("my_test_model")
class MyTestModel(ClassyModel):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 5)
self.linear2 = nn.Linear(5, 10)
def forward(self, x):
return self.linear2(self.linear(x))
@classmethod
def from_config(cls, config):
return cls()
class MyTestModel2(ClassyModel):
def forward(self, x):
return x + 1
# need to define these properties to make the model torchscriptable
@property
def input_shape(self):
return (1, 2, 3)
class TestSimpleClassyModelWrapper(ClassyModelWrapper):
def forward(self, x):
return self.classy_model(x) * 2
class TestClassyModel(unittest.TestCase):
def setUp(self) -> None:
self.base_dir = tempfile.mkdtemp()
self.orig_wrapper_cls_1 = MyTestModel.wrapper_cls
self.orig_wrapper_cls_2 = MyTestModel2.wrapper_cls
def tearDown(self) -> None:
shutil.rmtree(self.base_dir)
MyTestModel.wrapper_cls = self.orig_wrapper_cls_1
MyTestModel2.wrapper_cls = self.orig_wrapper_cls_2
def get_model_config(self, use_head):
config = {"name": "my_test_model"}
if use_head:
config["heads"] = [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 3,
"fork_block": "linear",
"in_plane": 5,
}
]
return config
def test_model_bad_name(self):
config = self.get_model_config(use_head=True)
test_bad_name = "__test_bad_name__"
config["name"] = test_bad_name
try:
_ = build_model(config)
except AssertionError as e:
self.assertTrue(
test_bad_name in str(e),
f"expected {test_bad_name} in error message, got {str(e)}",
)
return
self.assertTrue(False, "expected an AssertionError to be thrown")
def test_from_checkpoint(self):
config = get_test_task_config()
for use_head in [True, False]:
config["model"] = self.get_model_config(use_head)
task = build_task(config)
task.prepare()
checkpoint_folder = f"{self.base_dir}/{use_head}/"
input_args = {"config": config}
# Simulate training by setting the model parameters to zero
for param in task.model.parameters():
param.data.zero_()
checkpoint_hook = CheckpointHook(
checkpoint_folder, input_args, phase_types=["train"]
)
# Create checkpoint dir, save checkpoint
os.mkdir(checkpoint_folder)
checkpoint_hook.on_start(task)
task.train = True
checkpoint_hook.on_phase_end(task)
# Model should be checkpointed. load and compare
checkpoint = load_checkpoint(checkpoint_folder)
model = ClassyModel.from_checkpoint(checkpoint)
self.assertTrue(isinstance(model, MyTestModel))
# All parameters must be zero
for param in model.parameters():
self.assertTrue(torch.all(param.data == 0))
def test_classy_model_wrapper_instance(self):
# Test that we return a ClassyModel without a wrapper_cls
MyTestModel.wrapper_cls = None
model = MyTestModel()
self.assertEqual(type(model), MyTestModel)
self.assertIsInstance(model, MyTestModel)
self.assertIsInstance(model, ClassyModel)
self.assertIsInstance(model, nn.Module)
# Test that we return a ClassyModelWrapper when specified as the wrapper_cls
# The object should still pass the insinstance check
MyTestModel.wrapper_cls = ClassyModelWrapper
model = MyTestModel()
self.assertEqual(type(model), ClassyModelWrapper)
self.assertIsInstance(model, MyTestModel)
self.assertIsInstance(model, ClassyModel)
self.assertIsInstance(model, nn.Module)
def test_classy_model_wrapper_torch_scriptable(self):
input = torch.ones((2, 2))
for wrapper_cls, expected_output in [
(None, input + 1),
# this isn't supported yet
# (TestSimpleClassyModelWrapper, (input + 1) * 2),
]:
MyTestModel2.wrapper_cls = wrapper_cls
model = MyTestModel2()
scripted_model = torch.jit.script(model)
self.assertTrue(torch.allclose(expected_output, model(input)))
self.assertTrue(torch.allclose(expected_output, scripted_model(input)))
def test_classy_model_wrapper_torch_jittable(self):
input = torch.ones((2, 2))
for wrapper_cls, expected_output in [
(None, input + 1),
(TestSimpleClassyModelWrapper, (input + 1) * 2),
]:
MyTestModel2.wrapper_cls = wrapper_cls
model = MyTestModel2()
jitted_model = torch.jit.trace(model, input)
self.assertTrue(torch.allclose(expected_output, model(input)))
self.assertTrue(torch.allclose(expected_output, jitted_model(input)))
def test_classy_model_wrapper_attr(self):
model = MyTestModel2()
model.test_attr = 123
model_wrapper = ClassyModelWrapper(model)
self.assertTrue(hasattr(model_wrapper, "test_attr"))
self.assertEqual(model_wrapper.test_attr, 123)
# delete the attr
delattr(model_wrapper, "test_attr")
self.assertFalse(hasattr(model_wrapper, "test_attr"))
self.assertFalse(hasattr(model, "test_attr"))
def test_classy_model_set_state_strict(self):
model_1 = build_model(self.get_model_config(use_head=True))
model_state_1 = model_1.get_classy_state(deep_copy=True)
model_2 = build_model(self.get_model_config(use_head=False))
model_2.set_heads({"linear": [IdentityHead("default_head")]})
with self.assertRaises(RuntimeError):
model_2.set_classy_state(model_state_1)
model_2.set_classy_state(model_state_1, strict=False)
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 5)
def forward(self, x):
return self.linear(x)
def extract_features(self, x):
return torch.cat([x, x], dim=1)
class TestClassyModelAdapter(unittest.TestCase):
def test_classy_model_adapter(self):
model = TestModel()
classy_model = ClassyModel.from_model(model)
# test that the returned object is an instance of ClassyModel
self.assertIsInstance(classy_model, ClassyModel)
# test that the returned object is also an instance of _ClassyModelAdapter
self.assertIsInstance(classy_model, _ClassyModelAdapter)
# test that forward works correctly
input = torch.zeros((100, 10))
output = classy_model(input)
self.assertEqual(output.shape, (100, 5))
# test that extract_features works correctly
input = torch.zeros((100, 10))
output = classy_model.extract_features(input)
self.assertEqual(output.shape, (100, 20))
# test that get_classy_state and set_classy_state work
nn.init.constant_(classy_model.model.linear.weight, 1)
weights = copy.deepcopy(classy_model.model.linear.weight.data)
state_dict = classy_model.get_classy_state(deep_copy=True)
nn.init.constant_(classy_model.model.linear.weight, 0)
classy_model.set_classy_state(state_dict)
self.assertTrue(torch.allclose(weights, classy_model.model.linear.weight.data))
def test_classy_model_adapter_properties(self):
# test that the properties work correctly when passed to the adapter
model = TestModel()
input_shape = (10,)
model_depth = 1
classy_model = ClassyModel.from_model(
model, input_shape=input_shape, model_depth=model_depth
)
self.assertEqual(classy_model.input_shape, input_shape)
def test_train_step(self):
# test that the model can be run in a train step
model = models.resnet34(pretrained=False)
classy_model = ClassyModel.from_model(model)
config = get_fast_test_task_config()
task = build_task(config)
task.set_model(classy_model)
trainer = LocalTrainer()
trainer.train(task)
def test_heads(self):
model = models.resnet50(pretrained=False)
classy_model = ClassyModel.from_model(model)
num_classes = 5
head = FullyConnectedHead(
unique_id="default", in_plane=2048, num_classes=num_classes
)
classy_model.set_heads({"layer4": [head]})
input = torch.ones((1, 3, 224, 224))
self.assertEqual(classy_model(input).shape, (1, num_classes))
| ClassyVision-main | test/models_classy_model_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
import subprocess
import tempfile
import unittest
from pathlib import Path
import torch
from test.generic.config_utils import (
get_distributed_launch_cmd,
get_test_mlp_task_config,
)
class TestDistributedTrainer(unittest.TestCase):
def setUp(self):
config = get_test_mlp_task_config()
invalid_config = copy.deepcopy(config)
invalid_config["name"] = "invalid_task"
sync_bn_config = copy.deepcopy(config)
sync_bn_config["batch_norm_sync_mode"] = "pytorch"
self.config_files = {}
for config_key, config in [
("config", config),
("invalid_config", invalid_config),
("sync_bn_config", sync_bn_config),
]:
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
json.dump(config, f)
f.flush()
self.config_files[config_key] = f.name
self.path = Path(__file__).parent.absolute()
def tearDown(self):
for config_file in self.config_files.values():
os.unlink(config_file)
def test_training(self):
"""Checks we can train a small MLP model."""
num_processes = 2
for config_key, expected_success in [
("invalid_config", False),
("config", True),
]:
cmd = get_distributed_launch_cmd(
num_processes=num_processes,
trainer_path=f"{self.path}/../classy_train.py",
config_path=self.config_files[config_key],
)
result = subprocess.run(cmd, shell=True)
success = result.returncode == 0
self.assertEqual(success, expected_success)
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_sync_batch_norm(self):
"""Test that sync batch norm training doesn't hang."""
num_processes = 2
cmd = get_distributed_launch_cmd(
num_processes=num_processes,
trainer_path=f"{self.path}/../classy_train.py",
config_path=self.config_files["sync_bn_config"],
)
result = subprocess.run(cmd, shell=True)
self.assertEqual(result.returncode, 0)
| ClassyVision-main | test/trainer_distributed_trainer_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.dataset.transforms.mixup import MixupTransform
class DatasetTransformsMixupTest(unittest.TestCase):
def test_mixup_transform_single_label_image_batch(self):
mixup_alpha = 2.0
num_classes = 3
for mode in ["batch", "pair", "elem"]:
mixup_transform = MixupTransform(mixup_alpha, num_classes, mode=mode)
sample = {
"input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
"target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
}
sample_mixup = mixup_transform(sample)
self.assertTrue(sample["input"].shape == sample_mixup["input"].shape)
self.assertTrue(sample_mixup["target"].shape[0] == 4)
self.assertTrue(sample_mixup["target"].shape[1] == 3)
def test_cutmix_transform_single_label_image_batch(self):
mixup_alpha = 0
cutmix_alpha = 0.2
num_classes = 3
for mode in ["batch", "pair", "elem"]:
for minmax in [None, (0.3, 0.7)]:
cutmix_transform = MixupTransform(
mixup_alpha,
num_classes,
cutmix_alpha=cutmix_alpha,
mode=mode,
cutmix_minmax=minmax,
)
sample = {
"input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
"target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
}
sample_cutmix = cutmix_transform(sample)
self.assertTrue(sample["input"].shape == sample_cutmix["input"].shape)
self.assertTrue(sample_cutmix["target"].shape[0] == 4)
self.assertTrue(sample_cutmix["target"].shape[1] == 3)
def test_mixup_cutmix_transform_single_label_image_batch(self):
mixup_alpha = 0.3
cutmix_alpha = 0.2
num_classes = 3
for mode in ["batch", "pair", "elem"]:
cutmix_transform = MixupTransform(
mixup_alpha,
num_classes,
cutmix_alpha=cutmix_alpha,
switch_prob=0.5,
mode=mode,
)
for _i in range(4):
sample = {
"input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
"target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
}
sample_cutmix = cutmix_transform(sample)
self.assertTrue(sample["input"].shape == sample_cutmix["input"].shape)
self.assertTrue(sample_cutmix["target"].shape[0] == 4)
self.assertTrue(sample_cutmix["target"].shape[1] == 3)
def test_mixup_cutmix_transform_single_label_image_batch_label_smooth(self):
mixup_alpha = 0.3
cutmix_alpha = 0.2
num_classes = 3
for mode in ["batch", "pair", "elem"]:
cutmix_transform = MixupTransform(
mixup_alpha,
num_classes,
cutmix_alpha=cutmix_alpha,
switch_prob=0.5,
mode=mode,
label_smoothing=0.1,
)
for _i in range(4):
sample = {
"input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
"target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
}
sample_cutmix = cutmix_transform(sample)
self.assertTrue(sample["input"].shape == sample_cutmix["input"].shape)
self.assertTrue(sample_cutmix["target"].shape[0] == 4)
self.assertTrue(sample_cutmix["target"].shape[1] == 3)
def test_mixup_transform_single_label_image_batch_missing_num_classes(self):
mixup_alpha = 2.0
mixup_transform = MixupTransform(mixup_alpha, None)
sample = {
"input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
"target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
}
with self.assertRaises(Exception):
mixup_transform(sample)
def test_mixup_transform_multi_label_image_batch(self):
mixup_alpha = 2.0
mixup_transform = MixupTransform(mixup_alpha, None)
sample = {
"input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
"target": torch.as_tensor(
[[1, 0, 0, 0], [0, 1, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1]],
dtype=torch.int32,
),
}
sample_mixup = mixup_transform(sample)
self.assertTrue(sample["input"].shape == sample_mixup["input"].shape)
self.assertTrue(sample["target"].shape == sample_mixup["target"].shape)
def test_mixup_transform_single_label_multi_modal_batch(self):
mixup_alpha = 2.0
num_classes = 3
mixup_transform = MixupTransform(mixup_alpha, num_classes)
sample = {
"input": {
"video": torch.rand(4, 3, 4, 224, 224, dtype=torch.float32),
"audio": torch.rand(4, 1, 40, 100, dtype=torch.float32),
},
"target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
}
mixup_transform(sample)
def test_mixup_transform_multi_label_multi_modal_batch(self):
mixup_alpha = 2.0
mixup_transform = MixupTransform(mixup_alpha, None)
sample = {
"input": {
"video": torch.rand(4, 3, 4, 224, 224, dtype=torch.float32),
"audio": torch.rand(4, 1, 40, 100, dtype=torch.float32),
},
"target": torch.as_tensor(
[[1, 0, 0, 0], [0, 1, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1]],
dtype=torch.int32,
),
}
mixup_transform(sample)
| ClassyVision-main | test/dataset_transforms_mixup_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.losses import build_loss, ClassyLoss, register_loss, SumArbitraryLoss
@register_loss("mock_a")
class MockLoss1(ClassyLoss):
def forward(self, pred, target):
return torch.tensor(1.0)
@classmethod
def from_config(cls, config):
return cls()
@register_loss("mock_b")
class MockLoss2(ClassyLoss):
def forward(self, pred, target):
return torch.tensor(2.0)
@classmethod
def from_config(cls, config):
return cls()
@register_loss("mock_c")
class MockLoss3(ClassyLoss):
def forward(self, pred, target):
return torch.tensor(3.0)
@classmethod
def from_config(cls, config):
return cls()
class TestSumArbitraryLoss(unittest.TestCase):
def _get_config(self):
return {
"name": "sum_arbitrary",
"weights": [1.0, 1.0, 1.0],
"losses": [{"name": "mock_a"}, {"name": "mock_b"}, {"name": "mock_c"}],
}
def _get_outputs(self):
return torch.tensor([[2.0, 8.0]])
def _get_targets(self):
return torch.tensor([1])
def test_build_sum_arbitrary(self):
config = self._get_config()
crit = build_loss(config)
self.assertTrue(isinstance(crit, SumArbitraryLoss))
self.assertAlmostEqual(crit.weights, [1.0, 1.0, 1.0])
mod_list = [MockLoss1, MockLoss2, MockLoss3]
for idx, crit_type in enumerate(mod_list):
self.assertTrue(isinstance(crit.losses[idx], crit_type))
def test_sum_arbitrary(self):
config = self._get_config()
crit = SumArbitraryLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 1.0 + 2.0 + 3.0)
# Verify changing losses works
new_config = copy.deepcopy(config)
new_config.update(
{"losses": [{"name": "mock_a"}, {"name": "mock_b"}], "weights": [1.0, 1.0]}
)
crit = SumArbitraryLoss.from_config(new_config)
self.assertAlmostEqual(crit(outputs, targets).item(), 1.0 + 2.0)
# Verify changing weights works
new_config = copy.deepcopy(config)
new_config.update({"weights": [1.0, 2.0, 3.0]})
crit = SumArbitraryLoss.from_config(new_config)
self.assertAlmostEqual(
crit(outputs, targets).item(), 1.0 + 2.0 * 2.0 + 3.0 * 3.0
)
def test_deep_copy(self):
config = self._get_config()
crit1 = build_loss(config)
self.assertTrue(isinstance(crit1, SumArbitraryLoss))
outputs = self._get_outputs()
targets = self._get_targets()
crit1(outputs, targets)
crit2 = copy.deepcopy(crit1)
self.assertAlmostEqual(
crit1(outputs, targets).item(), crit2(outputs, targets).item()
)
| ClassyVision-main | test/losses_sum_arbitrary_loss_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from collections import defaultdict
import torch
from classy_vision.heads import build_head
from classy_vision.models import build_model, ClassyModel
from test.generic.config_utils import get_test_model_configs
from test.generic.utils import compare_model_state
class TestClassyModel(unittest.TestCase):
model_configs = get_test_model_configs()
def _get_config(self, model_config):
return {
"name": "classification_task",
"num_epochs": 12,
"loss": {"name": "test_loss"},
"dataset": {
"name": "imagenet",
"batchsize_per_replica": 8,
"use_pairs": False,
"num_samples_per_phase": None,
"use_shuffle": {"train": True, "test": False},
},
"meters": [],
"model": model_config,
"optimizer": {"name": "test_opt"},
}
def _compare_model_state(self, state, state2):
compare_model_state(self, state, state2)
def test_build_model(self):
for cfg in self.model_configs:
config = self._get_config(cfg)
model = build_model(config["model"])
self.assertTrue(isinstance(model, ClassyModel))
self.assertTrue(
type(model.input_shape) == tuple and len(model.input_shape) == 3
)
def test_get_set_state(self):
config = self._get_config(self.model_configs[0])
model = build_model(config["model"])
fake_input = torch.Tensor(1, 3, 224, 224).float()
model.eval()
state = model.get_classy_state()
with torch.no_grad():
output = model(fake_input)
model2 = build_model(config["model"])
model2.set_classy_state(state)
# compare the states
state2 = model2.get_classy_state()
self._compare_model_state(state, state2)
model2.eval()
with torch.no_grad():
output2 = model2(fake_input)
self.assertTrue(torch.allclose(output, output2))
# test deep_copy by assigning a deep copied state to model2
# and then changing the original model's state
state = model.get_classy_state(deep_copy=True)
model3 = build_model(config["model"])
state3 = model3.get_classy_state()
# assign model2's state to model's and also re-assign model's state
model2.set_classy_state(state)
model.set_classy_state(state3)
# compare the states
state2 = model2.get_classy_state()
self._compare_model_state(state, state2)
def test_get_set_head_states(self):
config = copy.deepcopy(self._get_config(self.model_configs[0]))
head_configs = config["model"]["heads"]
config["model"]["heads"] = []
model = build_model(config["model"])
trunk_state = model.get_classy_state()
heads = defaultdict(list)
for head_config in head_configs:
head = build_head(head_config)
heads[head_config["fork_block"]].append(head)
model.set_heads(heads)
model_state = model.get_classy_state()
# the heads should be the same as we set
self.assertEqual(len(heads), len(model.get_heads()))
for block_name, hs in model.get_heads().items():
self.assertEqual(hs, heads[block_name])
model.clear_heads()
self._compare_model_state(model.get_classy_state(), trunk_state)
model.set_heads(heads)
self._compare_model_state(model.get_classy_state(), model_state)
| ClassyVision-main | test/manual/models_classy_vision_model_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.optim.adamw_mt import AdamWMT
from test.generic.optim_test_util import TestOptimizer
class TestAdamWMTOptimizer(TestOptimizer, unittest.TestCase):
def _check_momentum_buffer(self):
return False
def _get_config(self):
return {
"name": "adamw_mt",
"num_epochs": 90,
"lr": 0.1,
"betas": (0.9, 0.99),
"eps": 1e-8,
"weight_decay": 0.0001,
"amsgrad": False,
}
def _instance_to_test(self):
return AdamWMT
| ClassyVision-main | test/manual/optim_adamw_mt_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import unittest.mock as mock
import progressbar
from classy_vision.hooks import ProgressBarHook
from test.generic.config_utils import get_test_classy_task
from test.generic.hook_test_utils import HookTestBase
class TestProgressBarHook(HookTestBase):
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {}
self.constructor_test_helper(
config=config, hook_type=ProgressBarHook, hook_registry_name="progress_bar"
)
@mock.patch("classy_vision.hooks.progress_bar_hook.progressbar")
@mock.patch("classy_vision.hooks.progress_bar_hook.is_primary")
def test_progress_bar(
self, mock_is_primary: mock.MagicMock, mock_progressbar_pkg: mock.MagicMock
) -> None:
"""
Tests that the progress bar is created, updated and destroyed correctly.
"""
mock_progress_bar = mock.create_autospec(progressbar.ProgressBar, instance=True)
mock_progressbar_pkg.ProgressBar.return_value = mock_progress_bar
mock_is_primary.return_value = True
task = get_test_classy_task()
task.prepare()
task.advance_phase()
num_batches = task.num_batches_per_phase
# make sure we are checking at least one batch
self.assertGreater(num_batches, 0)
# create a progress bar hook
progress_bar_hook = ProgressBarHook()
# progressbar.ProgressBar should be init-ed with num_batches
progress_bar_hook.on_phase_start(task)
mock_progressbar_pkg.ProgressBar.assert_called_once_with(num_batches)
mock_progress_bar.start.assert_called_once_with()
mock_progress_bar.start.reset_mock()
mock_progressbar_pkg.ProgressBar.reset_mock()
# on_step should update the progress bar correctly
for i in range(num_batches):
progress_bar_hook.on_step(task)
mock_progress_bar.update.assert_called_once_with(i + 1)
mock_progress_bar.update.reset_mock()
# check that even if on_step is called again, the progress bar is
# only updated with num_batches
for _ in range(num_batches):
progress_bar_hook.on_step(task)
mock_progress_bar.update.assert_called_once_with(num_batches)
mock_progress_bar.update.reset_mock()
# finish should be called on the progress bar
progress_bar_hook.on_phase_end(task)
mock_progress_bar.finish.assert_called_once_with()
mock_progress_bar.finish.reset_mock()
# check that even if the progress bar isn't created, the code doesn't
# crash
progress_bar_hook = ProgressBarHook()
try:
progress_bar_hook.on_step(task)
progress_bar_hook.on_phase_end(task)
except Exception as e:
self.fail(
"Received Exception when on_phase_start() isn't called: {}".format(e)
)
mock_progressbar_pkg.ProgressBar.assert_not_called()
# check that a progress bar is not created if is_primary() returns False
mock_is_primary.return_value = False
progress_bar_hook = ProgressBarHook()
try:
progress_bar_hook.on_phase_start(task)
progress_bar_hook.on_step(task)
progress_bar_hook.on_phase_end(task)
except Exception as e:
self.fail("Received Exception when is_primary() is False: {}".format(e))
self.assertIsNone(progress_bar_hook.progress_bar)
mock_progressbar_pkg.ProgressBar.assert_not_called()
| ClassyVision-main | test/manual/hooks_progress_bar_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.tasks import build_task, ClassificationTask
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_fast_test_task_config, get_test_task_config
class TestClassificationTaskAMP(unittest.TestCase):
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_build_task(self):
config = get_test_task_config()
task = build_task(config)
self.assertTrue(isinstance(task, ClassificationTask))
# check that AMP is disabled by default
self.assertIsNone(task.amp_args)
# test a valid APEX AMP opt level
config = copy.deepcopy(config)
config["amp_args"] = {"opt_level": "O1"}
task = build_task(config)
self.assertTrue(isinstance(task, ClassificationTask))
# test a valid Pytorch AMP
config = copy.deepcopy(config)
config["amp_args"] = {"amp_type": "pytorch"}
task = build_task(config)
self.assertTrue(isinstance(task, ClassificationTask))
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_training(self):
# Test an Apex AMP training
config = get_fast_test_task_config()
config["amp_args"] = {"opt_level": "O2"}
task = build_task(config)
task.set_use_gpu(True)
trainer = LocalTrainer()
trainer.train(task)
# Test a Pytorch AMP training
config["amp_args"] = {"amp_type": "pytorch"}
task = build_task(config)
task.set_use_gpu(True)
trainer = LocalTrainer()
trainer.train(task)
| ClassyVision-main | test/manual/tasks_classification_task_amp_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.hooks import ModelComplexityHook
from classy_vision.models import build_model
from test.generic.config_utils import get_test_classy_task, get_test_model_configs
from test.generic.hook_test_utils import HookTestBase
class TestModelComplexityHook(HookTestBase):
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {}
self.constructor_test_helper(
config=config,
hook_type=ModelComplexityHook,
hook_registry_name="model_complexity",
)
def test_model_complexity_hook(self) -> None:
model_configs = get_test_model_configs()
task = get_test_classy_task()
task.prepare()
# create a model complexity hook
model_complexity_hook = ModelComplexityHook()
for model_config in model_configs:
model = build_model(model_config)
task.base_model = model
with self.assertLogs():
model_complexity_hook.on_start(task)
| ClassyVision-main | test/manual/hooks_model_complexity_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.optim.adamw import AdamW
from test.generic.optim_test_util import TestOptimizer
class TestAdamWOptimizer(TestOptimizer, unittest.TestCase):
def _check_momentum_buffer(self):
return False
def _get_config(self):
return {
"name": "adamw",
"num_epochs": 90,
"lr": 0.1,
"betas": (0.9, 0.99),
"eps": 1e-8,
"weight_decay": 0.0001,
"amsgrad": False,
}
def _instance_to_test(self):
return AdamW
| ClassyVision-main | test/manual/optim_adamw_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import unittest.mock as mock
from itertools import product
from classy_vision.hooks import VisdomHook
from classy_vision.tasks import build_task
from test.generic.config_utils import get_test_task_config
from test.generic.hook_test_utils import HookTestBase
from visdom import Visdom
class TestVisdomHook(HookTestBase):
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {
"server": "test_server",
"port": "test_port",
"env": "test_env",
"title_suffix": "_test_suffix",
}
self.constructor_test_helper(
config=config, hook_type=VisdomHook, hook_registry_name="visdom"
)
@mock.patch("classy_vision.hooks.visdom_hook.is_primary")
@mock.patch("classy_vision.hooks.visdom_hook.Visdom", autospec=True)
def test_visdom(
self, mock_visdom_cls: mock.MagicMock, mock_is_primary: mock.MagicMock
) -> None:
"""
Tests that visdom is populated with plots.
"""
mock_visdom = mock.create_autospec(Visdom, instance=True)
mock_visdom_cls.return_value = mock_visdom
# set up the task and state
config = get_test_task_config()
config["dataset"]["train"]["batchsize_per_replica"] = 2
config["dataset"]["test"]["batchsize_per_replica"] = 5
task = build_task(config)
task.prepare()
losses = [1.2, 2.3, 1.23, 2.33]
loss_val = sum(losses) / len(losses)
task.losses = losses
visdom_server = "localhost"
visdom_port = 8097
for primary, visdom_conn in product([False, True], [False, True]):
mock_is_primary.return_value = primary
mock_visdom.check_connection.return_value = visdom_conn
# create a visdom hook
visdom_hook = VisdomHook(visdom_server, visdom_port)
mock_visdom_cls.assert_called_once()
mock_visdom_cls.reset_mock()
counts = {"train": 0, "test": 0}
count = 0
for phase_idx in range(10):
train = phase_idx % 2 == 0
task.train = train
phase_type = "train" if train else "test"
counts[phase_type] += 1
count += 1
# test that the metrics don't change if losses is empty and that
# visdom.line() is not called
task.losses = []
original_metrics = copy.deepcopy(visdom_hook.metrics)
visdom_hook.on_phase_end(task)
self.assertDictEqual(original_metrics, visdom_hook.metrics)
mock_visdom.line.assert_not_called()
# test that the metrics are updated correctly when losses
# is non empty
task.losses = [loss * count for loss in losses]
visdom_hook.on_phase_end(task)
# every meter should be present and should have the correct length
for meter in task.meters:
for key in meter.value:
key = phase_type + "_" + meter.name + "_" + key
self.assertTrue(
key in visdom_hook.metrics
and type(visdom_hook.metrics[key]) == list
and len(visdom_hook.metrics[key]) == counts[phase_type]
)
# the loss metric should be calculated correctly
loss_key = phase_type + "_loss"
self.assertTrue(
loss_key in visdom_hook.metrics
and type(visdom_hook.metrics[loss_key]) == list
and len(visdom_hook.metrics[loss_key]) == counts[phase_type]
)
self.assertAlmostEqual(
visdom_hook.metrics[loss_key][-1],
loss_val * count,
places=4,
)
# the lr metric should be correct
lr_key = phase_type + "_learning_rate"
self.assertTrue(
lr_key in visdom_hook.metrics
and type(visdom_hook.metrics[lr_key]) == list
and len(visdom_hook.metrics[lr_key]) == counts[phase_type]
)
self.assertAlmostEqual(
visdom_hook.metrics[lr_key][-1],
task.optimizer.options_view.lr,
places=4,
)
if primary and not train and visdom_conn:
# visdom.line() should be called once
mock_visdom.line.assert_called_once()
mock_visdom.line.reset_mock()
else:
# visdom.line() should not be called
mock_visdom.line.assert_not_called()
| ClassyVision-main | test/manual/hooks_visdom_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import shutil
import tempfile
import unittest.mock as mock
from itertools import product
from classy_vision.hooks import TensorboardPlotHook
from classy_vision.optim.param_scheduler import ClassyParamScheduler, UpdateInterval
from classy_vision.tasks import build_task
from classy_vision.tasks.classification_task import LastBatchInfo
from classy_vision.trainer import LocalTrainer
from test.generic.config_utils import get_test_mlp_task_config, get_test_task_config
from test.generic.hook_test_utils import HookTestBase
from torch.utils.tensorboard import SummaryWriter
class TestTensorboardPlotHook(HookTestBase):
def setUp(self) -> None:
self.base_dir = tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.base_dir)
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {"summary_writer": {}, "log_period": 5}
invalid_config = copy.deepcopy(config)
invalid_config["log_period"] = "this is not an int"
self.constructor_test_helper(
config=config,
hook_type=TensorboardPlotHook,
hook_registry_name="tensorboard_plot",
hook_kwargs={"tb_writer": SummaryWriter(), "log_period": 5},
invalid_configs=[invalid_config],
)
@mock.patch("classy_vision.hooks.tensorboard_plot_hook.is_primary")
def test_writer(self, mock_is_primary_func: mock.MagicMock) -> None:
"""
Tests that the tensorboard writer writes the correct scalars to SummaryWriter
iff is_primary() is True.
"""
for phase_idx, primary in product([0, 1, 2], [True, False]):
train, phase_type = (
(True, "train") if phase_idx % 2 == 0 else (False, "test")
)
mock_is_primary_func.return_value = primary
# set up the task and state
config = get_test_task_config()
config["dataset"]["train"]["batchsize_per_replica"] = 2
config["dataset"]["test"]["batchsize_per_replica"] = 5
task = build_task(config)
task.prepare()
task.advance_phase()
task.phase_idx = phase_idx
task.train = train
losses = [1.23, 4.45, 12.3, 3.4]
sample_fetch_times = [1.1, 2.2, 3.3, 2.2]
summary_writer = SummaryWriter(self.base_dir)
# create a spy on top of summary_writer
summary_writer = mock.MagicMock(wraps=summary_writer)
# create a loss lr tensorboard hook
tensorboard_plot_hook = TensorboardPlotHook(summary_writer)
# run the hook in the correct order
tensorboard_plot_hook.on_phase_start(task)
# test tasks which do not pass the sample_fetch_times as well
disable_sample_fetch_times = phase_idx == 0
for loss, sample_fetch_time in zip(losses, sample_fetch_times):
task.losses.append(loss)
step_data = (
{}
if disable_sample_fetch_times
else {"sample_fetch_time": sample_fetch_time}
)
task.last_batch = LastBatchInfo(None, None, None, None, step_data)
tensorboard_plot_hook.on_step(task)
tensorboard_plot_hook.on_phase_end(task)
if primary:
# add_scalar() should have been called with the right scalars
if train:
learning_rate_key = f"Learning Rate/{phase_type}"
summary_writer.add_scalar.assert_any_call(
learning_rate_key,
mock.ANY,
global_step=mock.ANY,
walltime=mock.ANY,
)
avg_loss_key = f"Losses/{phase_type}"
summary_writer.add_scalar.assert_any_call(
avg_loss_key, mock.ANY, global_step=mock.ANY
)
for meter in task.meters:
for name in meter.value:
meter_key = f"Meters/{phase_type}/{meter.name}/{name}"
summary_writer.add_scalar.assert_any_call(
meter_key, mock.ANY, global_step=mock.ANY
)
if step_data:
summary_writer.add_scalar.assert_any_call(
f"Speed/{phase_type}/cumulative_sample_fetch_time",
mock.ANY,
global_step=mock.ANY,
walltime=mock.ANY,
)
else:
# add_scalar() shouldn't be called since is_primary() is False
summary_writer.add_scalar.assert_not_called()
summary_writer.add_scalar.reset_mock()
def test_logged_lr(self):
# Mock LR scheduler
class SchedulerMock(ClassyParamScheduler):
def __call__(self, where):
return where
mock_lr_scheduler = SchedulerMock(UpdateInterval.STEP)
# Mock Logging
class DummySummaryWriter(object):
def __init__(self):
self.scalar_logs = {}
def add_scalar(self, key, value, global_step=None, walltime=None) -> None:
self.scalar_logs[key] = self.scalar_logs.get(key, []) + [value]
def add_histogram(
self, key, value, global_step=None, walltime=None
) -> None:
return
def add_text(self, *args, **kwargs):
pass
def flush(self):
return
config = get_test_mlp_task_config()
config["num_epochs"] = 3
config["dataset"]["train"]["batchsize_per_replica"] = 10
config["dataset"]["test"]["batchsize_per_replica"] = 5
task = build_task(config)
writer = DummySummaryWriter()
hook = TensorboardPlotHook(writer)
hook.log_period = 1
task.set_hooks([hook])
task.set_optimizer_schedulers({"lr": mock_lr_scheduler})
trainer = LocalTrainer()
trainer.train(task)
# We have 20 samples, batch size is 10. Each epoch is done in two steps.
self.assertEqual(
writer.scalar_logs["Learning Rate/train"],
[0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6],
)
| ClassyVision-main | test/manual/hooks_tensorboard_plot_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import unittest.mock as mock
from classy_vision.hooks import ModelTensorboardHook
from classy_vision.models import build_model
from test.generic.config_utils import get_test_classy_task, get_test_model_configs
from test.generic.hook_test_utils import HookTestBase
from torch.utils.tensorboard import SummaryWriter
class TestModelTensorboardHook(HookTestBase):
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {"summary_writer": {}}
self.constructor_test_helper(
config=config,
hook_type=ModelTensorboardHook,
hook_registry_name="model_tensorboard",
hook_kwargs={"tb_writer": SummaryWriter()},
)
@mock.patch("classy_vision.hooks.model_tensorboard_hook.is_primary")
def test_writer(self, mock_is_primary_func: mock.MagicMock) -> None:
"""
Tests that the tensorboard writer calls SummaryWriter with the model
iff is_primary() is True.
"""
mock_summary_writer = mock.create_autospec(SummaryWriter, instance=True)
task = get_test_classy_task()
task.prepare()
for primary in [False, True]:
mock_is_primary_func.return_value = primary
model_configs = get_test_model_configs()
for model_config in model_configs:
model = build_model(model_config)
task.base_model = model
# create a model tensorboard hook
model_tensorboard_hook = ModelTensorboardHook(mock_summary_writer)
model_tensorboard_hook.on_start(task)
if primary:
# SummaryWriter should have been init-ed with the correct
# add_graph should be called once with model as the first arg
mock_summary_writer.add_graph.assert_called_once()
self.assertEqual(
mock_summary_writer.add_graph.call_args[0][0], model
)
else:
# add_graph shouldn't be called since is_primary() is False
mock_summary_writer.add_graph.assert_not_called()
mock_summary_writer.reset_mock()
| ClassyVision-main | test/manual/hooks_model_tensorboard_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| ClassyVision-main | test/generic/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.hooks import build_hook
class HookTestBase(unittest.TestCase):
def constructor_test_helper(
self,
config,
hook_type,
hook_registry_name=None,
hook_kwargs=None,
invalid_configs=None,
):
hook_kwargs = config if hook_kwargs is None else hook_kwargs
hook1 = hook_type(**hook_kwargs)
self.assertTrue(isinstance(hook1, hook_type))
hook2 = hook_type.from_config(config)
self.assertTrue(isinstance(hook2, hook_type))
if hook_registry_name is not None:
config["name"] = hook_registry_name
hook3 = build_hook(config)
del config["name"]
self.assertTrue(isinstance(hook3, hook_type))
if invalid_configs is not None:
# Verify assert logic works correctly
for cfg in invalid_configs:
with self.assertRaises(
(AssertionError, TypeError, KeyError, ValueError)
):
hook_type.from_config(cfg)
| ClassyVision-main | test/generic/hook_test_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import queue
import tempfile
import unittest
import torch
UPDATE_SIGNAL = 0
VALUE_SIGNAL = 1
SHUTDOWN_SIGNAL = 2
TIMEOUT = 100
def _get_value_or_raise_error(qout, qerr):
try:
return qout.get(timeout=TIMEOUT)
except queue.Empty:
raise qerr.get(timeout=TIMEOUT)
def _run(qin, qout, qerr, func, *args):
try:
func(qin, qout, *args)
except Exception as e:
print(e)
qerr.put(e)
def _meter_worker(qin, qout, meter, is_train, world_size, rank, filename):
backend = "gloo"
torch.distributed.init_process_group(
backend=backend,
init_method="file://{filename}".format(filename=filename),
world_size=world_size,
rank=rank,
)
# Listen for commands on queues
while True:
try:
signal, val = qin.get()
except queue.Empty:
continue
if signal == UPDATE_SIGNAL:
meter.update(val[0], val[1], is_train=is_train)
elif signal == VALUE_SIGNAL:
meter.sync_state()
qout.put(meter.value)
elif signal == SHUTDOWN_SIGNAL:
break
else:
raise NotImplementedError("Bad signal value")
return
class ClassificationMeterTest(unittest.TestCase):
def setUp(self):
self.mp = multiprocessing.get_context("spawn")
self.processes = []
def tearDown(self):
for p in self.processes:
p.terminate()
def _spawn(self, func, *args):
name = "process #%d" % len(self.processes)
qin = self.mp.Queue()
qout = self.mp.Queue()
qerr = self.mp.Queue()
qio = (qin, qout, qerr)
args = qio + (func,) + args
process = self.mp.Process(target=_run, name=name, args=args, daemon=True)
process.start()
self.processes.append(process)
return qio
def _apply_updates_and_test_meter(
self, meter, model_output, target, expected_value, **kwargs
):
"""
Runs a valid meter test. Does not reset meter before / after running
"""
if not isinstance(model_output, list):
model_output = [model_output]
if not isinstance(target, list):
target = [target]
for i in range(len(model_output)):
meter.update(model_output[i], target[i], **kwargs)
meter.sync_state()
meter_value = meter.value
for key, val in expected_value.items():
self.assertTrue(
key in meter_value, msg="{0} not in meter value!".format(key)
)
if torch.is_tensor(meter_value[key]):
self.assertTrue(
torch.all(torch.eq(meter_value[key], val)),
msg="{0} meter value mismatch!".format(key),
)
else:
self.assertAlmostEqual(
meter_value[key],
val,
places=4,
msg="{0} meter value mismatch!".format(key),
)
def _values_match_expected_value(self, value0, value1, expected_value):
for key, val in expected_value.items():
self.assertTrue(key in value0, msg="{0} not in meter value!".format(key))
self.assertTrue(key in value1, msg="{0} not in meter value!".format(key))
if torch.is_tensor(val):
self.assertTrue(
torch.all(torch.eq(value0[key], val)),
"{0} meter value mismatch!".format(key),
)
self.assertTrue(
torch.all(torch.eq(value1[key], val)),
"{0} meter value mismatch!".format(key),
)
else:
self.assertAlmostEqual(
value0[key],
val,
places=4,
msg="{0} meter value mismatch!".format(key),
)
self.assertAlmostEqual(
value1[key],
val,
places=4,
msg="{0} meter value mismatch!".format(key),
)
def _validate_meter_inputs(self, meter, model_outputs, targets):
for i in range(len(model_outputs)):
meter.validate(model_outputs[i].size(), targets[i].size())
def meter_update_and_reset_test(
self, meter, model_outputs, targets, expected_value, **kwargs
):
"""
This test verifies that a single update on the meter is successful,
resets the meter, then applies the update again.
"""
# If a single output is provided, wrap in list
if not isinstance(model_outputs, list):
model_outputs = [model_outputs]
targets = [targets]
self._validate_meter_inputs(meter, model_outputs, targets)
self._apply_updates_and_test_meter(
meter, model_outputs, targets, expected_value, **kwargs
)
meter.reset()
# Verify reset works by reusing single update test
self._apply_updates_and_test_meter(
meter, model_outputs, targets, expected_value, **kwargs
)
def meter_invalid_meter_input_test(self, meter, model_output, target):
# Invalid model
with self.assertRaises(AssertionError):
meter.validate(model_output.shape, target.shape)
def meter_invalid_update_test(self, meter, model_output, target, **kwargs):
"""
Runs a valid meter test. Does not reset meter before / after running
"""
if not isinstance(model_output, list):
model_output = [model_output]
if not isinstance(target, list):
target = [target]
with self.assertRaises(AssertionError):
for i in range(len(model_output)):
meter.update(model_output[i], target[i], **kwargs)
def meter_get_set_classy_state_test(
self, meters, model_outputs, targets, expected_value, **kwargs
):
"""
Tests get and set classy state methods of meter.
"""
assert len(meters) == 2, "Incorrect number of meters passed to test"
assert (
len(model_outputs) == 2
), "Incorrect number of model_outputs passed to test"
assert len(targets) == 2, "Incorrect number of targets passed to test"
meter0 = meters[0]
meter1 = meters[1]
meter0.update(model_outputs[0], targets[0], **kwargs)
meter1.update(model_outputs[1], targets[1], **kwargs)
meter0.sync_state()
value0 = meter0.value
meter1.sync_state()
value1 = meter1.value
for key, val in value0.items():
if torch.is_tensor(value1[key]):
self.assertFalse(
torch.all(torch.eq(value1[key], val)),
msg="{0} meter values should not be same!".format(key),
)
else:
self.assertNotEqual(
value1[key],
val,
msg="{0} meter values should not be same!".format(key),
)
meter0.set_classy_state(meter1.get_classy_state())
value0 = meter0.value
for key, val in value0.items():
if torch.is_tensor(value1[key]):
self.assertTrue(
torch.all(torch.eq(value1[key], val)),
msg="{0} meter value mismatch after state transfer!".format(key),
)
self.assertTrue(
torch.all(torch.eq(value1[key], expected_value[key])),
msg="{0} meter value mismatch from ground truth!".format(key),
)
else:
self.assertAlmostEqual(
value1[key],
val,
places=4,
msg="{0} meter value mismatch after state transfer!".format(key),
)
self.assertAlmostEqual(
value1[key],
expected_value[key],
places=4,
msg="{0} meter value mismatch from ground truth!".format(key),
)
def _spawn_all_meter_workers(self, world_size, meters, is_train):
filename = tempfile.NamedTemporaryFile(delete=True).name
qins = []
qerrs = []
qouts = []
for i in range(world_size):
qin, qout, qerr = self._spawn(
_meter_worker, meters[i], is_train, world_size, i, filename
)
qins.append(qin)
qouts.append(qout)
qerrs.append(qerr)
return qins, qouts, qerrs
def meter_distributed_test(
self, meters, model_outputs, targets, expected_values, is_train=False
):
"""
Sets up two processes each with a given meter on that process.
Verifies that sync code path works.
"""
world_size = len(meters)
assert world_size == 2, "This test only works for world_size of 2"
assert len(model_outputs) == 4, (
"Test assumes 4 model outputs, "
"0, 2 passed to meter0 and 1, 3 passed to meter1"
)
assert (
len(targets) == 4
), "Test assumes 4 targets, 0, 2 passed to meter0 and 1, 3 passed to meter1"
assert len(expected_values) == 2, (
"Test assumes 2 expected values, "
"first is result of applying updates 0,1 to the meter, "
"second is result of applying all 4 updates to meter"
)
qins, qouts, qerrs = self._spawn_all_meter_workers(
world_size, meters, is_train=is_train
)
# First update each meter, then get value from each meter
qins[0].put_nowait((UPDATE_SIGNAL, (model_outputs[0], targets[0])))
qins[1].put_nowait((UPDATE_SIGNAL, (model_outputs[1], targets[1])))
qins[0].put_nowait((VALUE_SIGNAL, None))
qins[1].put_nowait((VALUE_SIGNAL, None))
value0 = _get_value_or_raise_error(qouts[0], qerrs[0])
value1 = _get_value_or_raise_error(qouts[1], qerrs[1])
self._values_match_expected_value(value0, value1, expected_values[0])
# Verify that calling value again does not break things
qins[0].put_nowait((VALUE_SIGNAL, None))
qins[1].put_nowait((VALUE_SIGNAL, None))
value0 = _get_value_or_raise_error(qouts[0], qerrs[0])
value1 = _get_value_or_raise_error(qouts[1], qerrs[1])
self._values_match_expected_value(value0, value1, expected_values[0])
# Second, update each meter, then get value from each meter
qins[0].put_nowait((UPDATE_SIGNAL, (model_outputs[2], targets[2])))
qins[1].put_nowait((UPDATE_SIGNAL, (model_outputs[3], targets[3])))
qins[0].put_nowait((VALUE_SIGNAL, None))
qins[1].put_nowait((VALUE_SIGNAL, None))
value0 = _get_value_or_raise_error(qouts[0], qerrs[0])
value1 = _get_value_or_raise_error(qouts[1], qerrs[1])
self._values_match_expected_value(value0, value1, expected_values[1])
qins[0].put_nowait((SHUTDOWN_SIGNAL, None))
qins[1].put_nowait((SHUTDOWN_SIGNAL, None))
| ClassyVision-main | test/generic/meter_test_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class MergeDataset:
"""
Dataset that merges samples from multiple datasets into single sample.
If datasets have distinct keys, then we merge dicts, e.g.
dataset1[idx] = {'input': input_tensor}
dataset2[idx] = {'target': target_tensor}
merged_dataset[idx] = {'input': input_tensor, 'target': target_tensor}
If datasets have matching keys then we create a list and append, e.g.
dataset1[idx] = {'input': input_tensor1}
dataset2[idx] = {'input': input_tensor2}
merged_dataset[idx] = {'input': [input_tensor1, input_tensor2]}
Note, if your datasets' samples do not have consistent keys for each sample,
this could lead to inconsistent samples merged samples.
"""
def __init__(self, datasets):
# assertions:
assert isinstance(datasets, list)
assert all(len(dataset) == len(datasets[0]) for dataset in datasets)
# create object:
super(MergeDataset, self).__init__()
self.datasets = datasets
def __getitem__(self, idx):
final_sample = {}
for dataset in self.datasets:
curr_sample = dataset[idx]
assert isinstance(curr_sample, dict), "Merge dataset only supports dicts"
for key in curr_sample.keys():
# If keys are distinct, then
if key not in final_sample:
final_sample[key] = curr_sample[key]
elif not isinstance(final_sample[key], list):
final_sample[key] = [final_sample[key], curr_sample[key]]
else:
final_sample[key].append(curr_sample[key])
return final_sample
def __len__(self):
return len(self.datasets[0])
| ClassyVision-main | test/generic/merge_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from functools import wraps
import torch
from classy_vision.hooks import ClassyHook
from classy_vision.tasks import ClassyTask
from classy_vision.trainer import ClassyTrainer
class Arguments(object):
"""Object that looks like input arguments. Used to spoof argparse namespace."""
def __init__(self, **args):
self.args = args
self.__dict__.update(args)
def __iter__(self):
return iter(self.args)
def __eq__(self, other):
if isinstance(other, Arguments):
return self.args == other.args
else:
return NotImplemented
def _asdict(self):
return vars(self)
def skip_if_no_gpu(func):
"""Decorator that can be used to skip GPU tests on non-GPU machines."""
func.skip_if_no_gpu = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
return
if torch.cuda.device_count() <= 0:
return
return func(*args, **kwargs)
return wrapper
def repeat_test(original_function=None, *, num_times=3):
"""Decorator that can be used to repeat test multiple times."""
def repeat_test_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for _ in range(num_times):
func(*args, **kwargs)
return wrapper
# this handles default arguments to decorator:
if original_function:
return repeat_test_decorator(original_function)
return repeat_test_decorator
def make_torch_deterministic(seed=0):
"""Makes Torch code run deterministically."""
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
def compare_batches(test_fixture, batch1, batch2):
"""Compare two batches. Does not do recursive comparison"""
test_fixture.assertEqual(type(batch1), type(batch2))
if isinstance(batch1, (tuple, list)):
test_fixture.assertEqual(len(batch1), len(batch2))
for n in range(len(batch1)):
value1 = batch1[n]
value2 = batch2[n]
test_fixture.assertEqual(type(value1), type(value2))
if torch.is_tensor(value1):
test_fixture.assertTrue(torch.allclose(value1, value2))
else:
test_fixture.assertEqual(value1, value2)
elif isinstance(batch1, dict):
test_fixture.assertEqual(batch1.keys(), batch2.keys())
for key, value1 in batch1.items():
value2 = batch2[key]
test_fixture.assertEqual(type(value1), type(value2))
if torch.is_tensor(value1):
test_fixture.assertTrue(torch.allclose(value1, value2))
else:
test_fixture.assertEqual(value1, value2)
def compare_datasets(test_fixture, dataset1, dataset2):
test_fixture.assertEqual(len(dataset1), len(dataset2))
for idx in range(len(dataset1)):
compare_batches(test_fixture, dataset1[idx], dataset2[idx])
def compare_batchlist_and_dataset_with_skips(
test_fixture, batch_list, dataset, skip_indices=None
):
"""
Compares a list of batches and the dataset. If some samples were
skipped in the iterator (i.e. if we simulated an error on that
sample), that should be indicated in the skip_indices list
"""
if skip_indices is None:
skip_indices = []
if isinstance(skip_indices, int):
skip_indices = [skip_indices]
skips = 0
for idx, batch in enumerate(batch_list):
while (idx + skips) in skip_indices:
skips += 1
dataset_batch = dataset[idx + skips]
compare_batches(test_fixture, batch, dataset_batch)
class MockErrorDataset:
"""
Dataset used for testing. Wraps a real dataset with a
batchsize_per_replica, but allows us to delete samples on return
to simulate errors (similar to what happens with Everstore)
"""
def __init__(self, dataset):
self.rebatch_map = {}
self.dataset = dataset
self.batchsize_per_replica = dataset.batchsize_per_replica
def __getitem__(self, idx):
batch = self.dataset[idx]
# If rebatch map contains index, resize the batch
if idx in self.rebatch_map:
num_samples = self.rebatch_map[idx]
if num_samples < batch["input"].size()[0]:
batch["input"] = batch["input"][:num_samples]
batch["target"] = batch["target"][:num_samples]
return batch
def __len__(self):
return len(self.dataset)
def recursive_unpack(batch):
"""
Takes a batch of samples, e.g.
batch = {'input': tensor([256, 3, 224, 224]), 'target': tensor([256])}
and unpacks them into a list of single samples, e.g.
[{'input': tensor([1, 3, 224, 224]), 'target': tensor([1])} ... ]
"""
new_list = []
if isinstance(batch, dict):
unpacked_dict = {}
batchsize_per_replica = -1
for key, val in batch.items():
unpacked_dict[key] = recursive_unpack(val)
batchsize_per_replica = (
len(unpacked_dict[key])
if not torch.is_tensor(unpacked_dict[key])
else 1
)
for idx in range(batchsize_per_replica):
sample = {}
for key, val in unpacked_dict.items():
sample[key] = val[idx]
new_list.append(sample)
return new_list
elif isinstance(batch, (list, tuple)):
unpacked_list = []
if isinstance(batch, tuple):
batch = list(batch)
for val in batch:
unpacked_list.append(recursive_unpack(val))
batchsize_per_replica = (
len(unpacked_list[0]) if not torch.is_tensor(unpacked_list[0]) else 1
)
for idx in range(batchsize_per_replica):
sample = []
for val in unpacked_list:
sample.append(val[idx])
if isinstance(batch, tuple):
sample = tuple(sample)
new_list.append(sample)
return new_list
elif torch.is_tensor(batch):
for i in range(batch.size()[0]):
new_list.append(batch[i])
return new_list
raise TypeError("Unexpected type %s passed to unpack" % type(batch))
def compare_model_state(test_fixture, state, state2, check_heads=True):
for k in state["model"]["trunk"].keys():
if not torch.allclose(state["model"]["trunk"][k], state2["model"]["trunk"][k]):
print(k, state["model"]["trunk"][k], state2["model"]["trunk"][k])
test_fixture.assertTrue(
torch.allclose(state["model"]["trunk"][k], state2["model"]["trunk"][k])
)
if check_heads:
for block, head_states in state["model"]["heads"].items():
for head_id, states in head_states.items():
for k in states.keys():
test_fixture.assertTrue(
torch.allclose(
state["model"]["heads"][block][head_id][k],
state2["model"]["heads"][block][head_id][k],
)
)
def compare_samples(test_fixture, sample1, sample2):
test_fixture.assertEqual(sample1.keys(), sample2.keys())
test_fixture.assertTrue(torch.is_tensor(sample1["input"]))
test_fixture.assertTrue(torch.is_tensor(sample2["input"]))
test_fixture.assertTrue(torch.is_tensor(sample1["target"]))
test_fixture.assertTrue(torch.is_tensor(sample2["target"]))
test_fixture.assertTrue(torch.allclose(sample1["input"], sample2["input"]))
test_fixture.assertTrue(torch.allclose(sample1["target"], sample2["target"]))
def compare_states(test_fixture, state_1, state_2, check_heads=True):
"""
Tests the classy state dicts for equality, but skips the member objects
which implement their own {get, set}_classy_state functions.
"""
# check base_model
compare_model_state(
test_fixture, state_1["base_model"], state_2["base_model"], check_heads
)
# check losses
test_fixture.assertEqual(len(state_1["losses"]), len(state_2["losses"]))
for loss_1, loss_2 in zip(state_1["losses"], state_2["losses"]):
test_fixture.assertAlmostEqual(loss_1, loss_2)
for key in ["base_model", "meters", "optimizer", "losses"]:
# we trust that these have been tested using their unit tests or
# by the code above
test_fixture.assertIn(key, state_1)
test_fixture.assertIn(key, state_2)
del state_1[key]
del state_2[key]
test_fixture.assertDictEqual(state_1, state_2)
class LimitedPhaseException(Exception):
pass
class LimitedPhaseHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self, num_phases: int):
self.num_phases = num_phases
self.phase_counter = 0
def on_phase_end(self, task):
if self.phase_counter >= self.num_phases:
raise LimitedPhaseException
self.phase_counter += 1
class LimitedPhaseTrainer(ClassyTrainer):
def __init__(self, num_phases: int):
self.num_phases = num_phases
def train(self, task: ClassyTask):
task.hooks = task.hooks + [LimitedPhaseHook(self.num_phases)]
try:
super().train(task)
except LimitedPhaseException:
pass
class ClassyTestCase(unittest.TestCase):
def assertTorchAllClose(
self, tensor_1, tensor_2, rtol=1e-5, atol=1e-8, equal_nan=False
):
for tensor in [tensor_1, tensor_2]:
if not isinstance(tensor, torch.Tensor):
raise AssertionError(
f"Expected tensor, not {tensor} of type {type(tensor)}"
)
if not torch.allclose(
tensor_1, tensor_2, rtol=rtol, atol=atol, equal_nan=equal_nan
):
raise AssertionError(f"{tensor_1} is not close to {tensor_2}")
| ClassyVision-main | test/generic/utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from unittest.mock import MagicMock
import torch
import torch.nn as nn
from classy_vision.generic.util import split_batchnorm_params
from classy_vision.models import ClassyModel
from classy_vision.optim import build_optimizer, build_optimizer_schedulers
from classy_vision.optim.param_scheduler import LinearParamScheduler
class TestOptimizer(ABC):
@abstractmethod
def _get_config(self):
pass
@abstractmethod
def _instance_to_test(self):
pass
def _check_momentum_buffer(self):
return False
def _parameters(self, requires_grad=True):
return [
torch.tensor([[1.0, 2.0]], requires_grad=requires_grad),
torch.tensor([[3.0, 4.0]], requires_grad=requires_grad),
]
def _set_gradient(self, params, grad_values=None):
if grad_values is None:
grad_values = [0.1, 0.1]
for i in range(len(params)):
params[i].grad = torch.tensor([grad_values])
def _compare_momentum_values(self, optim1, optim2):
self.assertEqual(len(optim1["param_groups"]), len(optim2["param_groups"]))
for i in range(len(optim1["param_groups"])):
self.assertEqual(
len(optim1["param_groups"][i]["params"]),
len(optim2["param_groups"][i]["params"]),
)
if self._check_momentum_buffer():
for j in range(len(optim1["param_groups"][i]["params"])):
id1 = optim1["param_groups"][i]["params"][j]
id2 = optim2["param_groups"][i]["params"][j]
self.assertTrue(
torch.allclose(
optim1["state"][id1]["momentum_buffer"],
optim2["state"][id2]["momentum_buffer"],
)
)
def _get_set_state(self, grad_values):
config = self._get_config()
opt1 = build_optimizer(config)
opt1.set_param_groups(self._parameters(), lr=1, momentum=0.9)
self.assertIsInstance(opt1, self._instance_to_test())
self._set_gradient(self._parameters(), grad_values)
opt1.step(where=0)
if config["name"] == "zero":
opt1.consolidate_state_dict()
state = opt1.get_classy_state()
opt2 = build_optimizer(config)
opt2.set_param_groups(self._parameters(), lr=2)
self.assertNotEqual(opt1.options_view.lr, opt2.options_view.lr)
opt2.set_classy_state(state)
self.assertEqual(opt1.options_view.lr, opt2.options_view.lr)
for i in range(len(opt1.optimizer.param_groups[0]["params"])):
self.assertTrue(
torch.allclose(
opt1.optimizer.param_groups[0]["params"][i],
opt2.optimizer.param_groups[0]["params"][i],
)
)
if config["name"] == "zero":
opt2.consolidate_state_dict()
self._compare_momentum_values(
opt1.get_classy_state()["optim"], opt2.get_classy_state()["optim"]
)
# check if the optimizers behave the same on params update
mock_classy_vision_model1 = self._parameters()
mock_classy_vision_model2 = self._parameters()
self._set_gradient(mock_classy_vision_model1, grad_values)
self._set_gradient(mock_classy_vision_model2, grad_values)
opt1 = build_optimizer(config)
opt1.set_param_groups(mock_classy_vision_model1)
opt2 = build_optimizer(config)
opt2.set_param_groups(mock_classy_vision_model2)
opt1.step(where=0)
opt2.step(where=0)
for i in range(len(opt1.optimizer.param_groups[0]["params"])):
print(opt1.optimizer.param_groups[0]["params"][i])
self.assertTrue(
torch.allclose(
opt1.optimizer.param_groups[0]["params"][i],
opt2.optimizer.param_groups[0]["params"][i],
)
)
if config["name"] == "zero":
opt1.consolidate_state_dict()
opt2.consolidate_state_dict()
self._compare_momentum_values(
opt1.get_classy_state()["optim"], opt2.get_classy_state()["optim"]
)
def test_build_sgd(self):
config = self._get_config()
opt = build_optimizer(config)
opt.set_param_groups(self._parameters())
self.assertTrue(isinstance(opt, self._instance_to_test()))
def test_get_set_state(self):
for grad_values in [[0.1, 0.1], [-0.1, -0.1], [0.0, 0.0], [0.1, -0.1]]:
self._get_set_state(grad_values)
def test_set_invalid_state(self):
config = self._get_config()
opt = build_optimizer(config)
opt.set_param_groups(self._parameters())
self.assertTrue(isinstance(opt, self._instance_to_test()))
with self.assertRaises(KeyError):
opt.set_classy_state({})
def test_lr_schedule(self):
config = self._get_config()
opt = build_optimizer(config)
param_schedulers = build_optimizer_schedulers(config)
opt.set_param_groups({"params": self._parameters(), **param_schedulers})
# Test initial learning rate
for group in opt.optimizer.param_groups:
self.assertEqual(group["lr"], 0.1)
def _test_lr_schedule(optimizer, num_epochs, epochs, targets):
for i in range(len(epochs)):
epoch = epochs[i]
target = targets[i]
param_groups = optimizer.optimizer.param_groups.copy()
optimizer.on_epoch(epoch / num_epochs)
for idx, group in enumerate(optimizer.optimizer.param_groups):
self.assertEqual(group["lr"], target)
# Make sure all but LR is same
param_groups[idx]["lr"] = target
self.assertEqual(param_groups[idx], group)
# Test constant learning schedule
num_epochs = 90
epochs = [0, 0.025, 0.05, 0.1, 0.5, 1, 15, 29, 30, 31, 59, 60, 61, 88, 89]
targets = [0.1] * 15
_test_lr_schedule(opt, num_epochs, epochs, targets)
# Test step learning schedule
config["param_schedulers"] = {
"lr": {"name": "step", "values": [0.1, 0.01, 0.001]}
}
opt = build_optimizer(config)
param_schedulers = build_optimizer_schedulers(config)
opt.set_param_groups({"params": self._parameters(), **param_schedulers})
targets = [0.1] * 8 + [0.01] * 3 + [0.001] * 4
_test_lr_schedule(opt, num_epochs, epochs, targets)
# Test step learning schedule with warmup
init_lr = 0.01
warmup_epochs = 0.1
config["param_schedulers"] = {
"lr": {
"name": "composite",
"schedulers": [
{"name": "linear", "start_value": init_lr, "end_value": 0.1},
{"name": "step", "values": [0.1, 0.01, 0.001]},
],
"update_interval": "epoch",
"interval_scaling": ["rescaled", "fixed"],
"lengths": [warmup_epochs / num_epochs, 1 - warmup_epochs / num_epochs],
}
}
opt = build_optimizer(config)
param_schedulers = build_optimizer_schedulers(config)
opt.set_param_groups({"params": self._parameters(), **param_schedulers})
targets = [0.01, 0.0325, 0.055] + [0.1] * 5 + [0.01] * 3 + [0.001] * 4
_test_lr_schedule(opt, num_epochs, epochs, targets)
def test_set_param_groups(self):
opt = build_optimizer(self._get_config())
# This must crash since we're missing the .set_param_groups call
with self.assertRaises(RuntimeError):
opt.step(where=0)
def test_step_args(self):
opt = build_optimizer(self._get_config())
opt.set_param_groups([torch.tensor([1.0], requires_grad=True)])
# where argument must be named explicitly
with self.assertRaises(RuntimeError):
opt.step(0)
# this shouldn't crash
opt.step(where=0)
def test_get_lr(self):
opt = build_optimizer(self._get_config())
param = torch.tensor([1.0], requires_grad=True)
opt.set_param_groups([{"params": [param], "lr": 1}])
self.assertEqual(opt.options_view.lr, 1)
# Case two: verify LR changes
opt = build_optimizer(self._get_config())
param = torch.tensor([1.0], requires_grad=True)
opt.set_param_groups([{"params": [param], "lr": LinearParamScheduler(1, 2)}])
self.assertAlmostEqual(opt.options_view.lr, 1)
opt.step(where=0.5)
self.assertAlmostEqual(opt.options_view.lr, 1.5)
def test_batchnorm_weight_decay(self):
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(2, 3)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(3)
def forward(self, x):
return self.bn(self.relu(self.lin(x)))
torch.manual_seed(1)
model = MyModel()
opt = build_optimizer(self._get_config())
bn_params, lin_params = split_batchnorm_params(model)
lin_param_before = model.lin.weight.detach().clone()
bn_param_before = model.bn.weight.detach().clone()
with torch.enable_grad():
x = torch.tensor([[1.0, 1.0], [1.0, 2.0]])
out = model(x).pow(2).sum()
out.backward()
opt.set_param_groups(
[
{
"params": lin_params,
"lr": LinearParamScheduler(1, 2),
"weight_decay": 0.5,
},
{"params": bn_params, "lr": 0, "weight_decay": 0},
]
)
opt.step(where=0.5)
# Make sure the linear parameters are trained but not the batch norm
self.assertFalse(torch.allclose(model.lin.weight, lin_param_before))
self.assertTrue(torch.allclose(model.bn.weight, bn_param_before))
opt.step(where=0.5)
# Same, but after another step and triggering the lr scheduler
self.assertFalse(torch.allclose(model.lin.weight, lin_param_before))
self.assertTrue(torch.allclose(model.bn.weight, bn_param_before))
| ClassyVision-main | test/generic/optim_test_util.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from classy_vision.tasks import build_task
def get_test_task_config(head_num_classes=1000):
return {
"name": "classification_task",
"num_epochs": 12,
"loss": {"name": "CrossEntropyLoss"},
"dataset": {
"train": {
"name": "synthetic_image",
"crop_size": 224,
"class_ratio": 0.5,
"num_samples": 2000,
"seed": 0,
"batchsize_per_replica": 32,
"num_workers": 1,
"use_shuffle": True,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
"test": {
"name": "synthetic_image",
"crop_size": 224,
"class_ratio": 0.5,
"num_samples": 2000,
"seed": 0,
"batchsize_per_replica": 32,
"num_workers": 1,
"use_shuffle": False,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
},
"meters": {"accuracy": {"topk": [1, 5]}},
"model": {
"name": "resnet",
"num_blocks": [3, 4, 6, 3],
"small_input": False,
"zero_init_bn_residuals": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": head_num_classes,
"fork_block": "block3-2",
"in_plane": 2048,
}
],
},
"optimizer": {
"name": "sgd",
"num_epochs": 12,
"param_schedulers": {"lr": {"name": "step", "values": [0.1, 0.01]}},
"weight_decay": 1e-4,
"momentum": 0.9,
},
}
def get_fast_test_task_config(head_num_classes=1000):
return {
"name": "classification_task",
"num_epochs": 1,
"loss": {"name": "CrossEntropyLoss"},
"dataset": {
"train": {
"name": "synthetic_image",
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 10,
"seed": 0,
"batchsize_per_replica": 2,
"num_workers": 1,
"use_shuffle": False,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
"test": {
"name": "synthetic_image",
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 10,
"seed": 0,
"batchsize_per_replica": 2,
"num_workers": 1,
"use_shuffle": False,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
},
"model": {
"name": "resnet",
"num_blocks": [1],
"small_input": False,
"zero_init_bn_residuals": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": head_num_classes,
"fork_block": "block0-0",
"in_plane": 256,
}
],
},
"meters": {"accuracy": {"topk": [1]}},
"optimizer": {"name": "sgd", "lr": 0.01, "weight_decay": 1e-4, "momentum": 0.9},
}
def get_test_classy_task():
config = get_test_task_config()
task = build_task(config)
return task
def get_test_mlp_task_config():
return {
"name": "classification_task",
"num_epochs": 10,
"loss": {"name": "CrossEntropyLoss"},
"dataset": {
"train": {
"name": "synthetic_image",
"num_classes": 2,
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 20,
"seed": 0,
"batchsize_per_replica": 6,
"use_augmentation": False,
"use_shuffle": True,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
"test": {
"name": "synthetic_image",
"num_classes": 2,
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 10,
"seed": 0,
"batchsize_per_replica": 1,
"use_augmentation": False,
"use_shuffle": False,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
},
"model": {
"name": "mlp",
# 3x20x20 = 1200
"input_dim": 1200,
"output_dim": 1000,
"hidden_dims": [10],
"use_batchnorm": True, # used for testing sync batchnorm
},
"meters": {"accuracy": {"topk": [1]}},
"optimizer": {
"name": "sgd",
"num_epochs": 10,
"lr": 0.1,
"weight_decay": 1e-4,
"momentum": 0.9,
},
}
def get_test_model_configs():
return [
# resnet 50
{
"name": "resnet",
"num_blocks": [3, 4, 6, 3],
"small_input": False,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-2",
"in_plane": 2048,
}
],
},
# resnet 101
{
"name": "resnet",
"num_blocks": [3, 4, 23, 3],
"small_input": False,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-2",
"in_plane": 2048,
}
],
},
# resnext 101 32x4d
{
"name": "resnext",
"num_blocks": [3, 4, 23, 3],
"base_width_and_cardinality": [4, 32],
"small_input": False,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "block3-2",
"in_plane": 2048,
}
],
},
]
def get_test_video_task_config():
return {
"name": "classification_task",
"num_epochs": 27,
"loss": {"name": "CrossEntropyLoss"},
"dataset": {
"train": {
"name": "synthetic_video",
"split": "train",
"batchsize_per_replica": 8,
"use_shuffle": True,
"num_samples": 128,
"frames_per_clip": 8,
"video_height": 128,
"video_width": 160,
"num_classes": 50,
"clips_per_video": 1,
},
"test": {
"name": "synthetic_video",
"split": "test",
"batchsize_per_replica": 10,
"use_shuffle": False,
"num_samples": 40,
"frames_per_clip": 8,
"video_height": 128,
"video_width": 160,
"num_classes": 50,
"clips_per_video": 10,
},
},
"meters": {"accuracy": {"topk": [1, 5]}},
"model": {
"name": "resnext3d",
"frames_per_clip": 8,
"input_planes": 3,
"clip_crop_size": 224,
"skip_transformation_type": "postactivated_shortcut",
"residual_transformation_type": "postactivated_bottleneck_transformation",
"num_blocks": [3, 4, 6, 3],
"input_key": "video",
"stem_name": "resnext3d_stem",
"stem_planes": 64,
"stem_temporal_kernel": 5,
"stem_spatial_kernel": 7,
"stem_maxpool": True,
"stage_planes": 64,
"stage_temporal_kernel_basis": [[3], [3, 1], [3, 1], [1, 3]],
"temporal_conv_1x1": [True, True, True, True],
"stage_temporal_stride": [1, 1, 1, 1],
"stage_spatial_stride": [1, 2, 2, 2],
"num_groups": 1,
"width_per_group": 64,
"num_classes": 50,
"heads": [
{
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"pool_size": [8, 7, 7],
"activation_func": "softmax",
"num_classes": 50,
"fork_block": "pathway0-stage4-block2",
"in_plane": 512,
"use_dropout": True,
}
],
},
"optimizer": {
"name": "sgd",
"param_schedulers": {
"lr": {
"name": "multistep",
"num_epochs": 10,
"values": [0.1, 0.01, 0.001, 0.0001],
"milestones": [3, 7, 9],
}
},
"weight_decay": 0.0001,
"momentum": 0.9,
},
}
def get_test_classy_video_task():
config = get_test_video_task_config()
task = build_task(config)
return task
def get_distributed_launch_cmd(num_processes: int, trainer_path: str, config_path: str):
return f"""{sys.executable} -m torch.distributed.launch \
--nnodes=1 \
--nproc_per_node={num_processes} \
--master_addr=localhost \
--master_port=29500 \
--use_env \
{trainer_path} \
--config={config_path} \
--log_freq=100 \
--distributed_backend=ddp
"""
| ClassyVision-main | test/generic/config_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| ClassyVision-main | hydra_plugins/classy_vision_path/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_search_path import ConfigSearchPath
from hydra.plugins.search_path_plugin import SearchPathPlugin
class ClassyVisionPathPlugin(SearchPathPlugin):
def manipulate_search_path(self, search_path: ConfigSearchPath) -> None:
search_path.append(
provider="classy_vision", path="pkg://classy_vision.hydra.conf"
)
| ClassyVision-main | hydra_plugins/classy_vision_path/classy_vision_path.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "Classy Vision"
copyright = "2019, Facebook AI Research"
author = "Facebook AI Research"
# The full version, including alpha/beta/rc tags
release = "0.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
]
autodoc_default_flags = ["undoc-members"]
autodoc_default_options = {
"special-members": "__init__,__call__",
"autodoc_typehints": "none",
}
primary_domain = "py"
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pytorch": ("https://pytorch.org/docs/stable", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path = [] # for now we have no static files to track
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
| ClassyVision-main | sphinx/conf.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, ScriptExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Tutorial = require(`${{CWD}}/core/Tutorial.js`);
class TutorialPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Tutorial baseUrl={{baseUrl}} tutorialID="{}"/>;
}}
}}
module.exports = TutorialPage;
"""
JS_SCRIPTS = """
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
""" # noqa: E501
def gen_tutorials(repo_dir: str) -> None:
"""Generate HTML tutorials for captum Docusaurus site from Jupyter notebooks.
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.loads(infile.read())
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
for tid in tutorial_ids:
print("Generating {} tutorial".format(tid))
# convert notebook to HTML
ipynb_in_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid))
with open(ipynb_in_path, "r") as infile:
nb_str = infile.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
# displayname is absent from notebook metadata
nb["metadata"]["kernelspec"]["display_name"] = "python3"
exporter = HTMLExporter()
html, meta = exporter.from_notebook_node(nb)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("body", {"class": "jp-Notebook"})
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
repo_dir, "website", "_tutorials", "{}.html".format(tid)
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
script = TEMPLATE.format(tid)
js_out_path = os.path.join(
repo_dir, "website", "pages", "tutorials", "{}.js".format(tid)
)
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output tutorial in both ipynb & py form
ipynb_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.ipynb".format(tid)
)
with open(ipynb_out_path, "w") as ipynb_outfile:
ipynb_outfile.write(nb_str)
exporter = ScriptExporter()
script, meta = exporter.from_notebook_node(nb)
py_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.py".format(tid)
)
with open(py_out_path, "w") as py_outfile:
py_outfile.write(script)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"-w", "--repo_dir", metavar="path", required=True, help="captum repo directory."
)
args = parser.parse_args()
gen_tutorials(args.repo_dir)
| ClassyVision-main | scripts/parse_tutorials.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from bs4 import BeautifulSoup
js_scripts = """
<script type="text/javascript" id="documentation_options" data-url_root="./"
src="/js/documentation_options.js"></script>
<script type="text/javascript" src="/js/jquery.js"></script>
<script type="text/javascript" src="/js/underscore.js"></script>
<script type="text/javascript" src="/js/doctools.js"></script>
<script type="text/javascript" src="/js/language_data.js"></script>
<script type="text/javascript" src="/js/searchtools.js"></script>
""" # noqa: E501
search_js_scripts = """
<script type="text/javascript">
jQuery(function() { Search.loadIndex("/js/searchindex.js"); });
</script>
<script type="text/javascript" id="searchindexloader"></script>
"""
def parse_sphinx(input_dir, output_dir):
for cur, _, files in os.walk(input_dir):
for fname in files:
if fname.endswith(".html"):
with open(os.path.join(cur, fname), "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
doc = soup.find("div", {"class": "document"})
wrapped_doc = doc.wrap(soup.new_tag("div", **{"class": "sphinx"}))
# add js
if fname == "search.html":
out = js_scripts + search_js_scripts + str(wrapped_doc)
else:
out = js_scripts + str(wrapped_doc)
output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir))
os.makedirs(output_path, exist_ok=True)
with open(os.path.join(output_path, fname), "w") as fout:
fout.write(out)
# update reference in JS file
with open(os.path.join(input_dir, "_static/searchtools.js"), "r") as js_file:
js = js_file.read()
js = js.replace(
"DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/'", "'_sphinx-sources/'"
)
with open(os.path.join(input_dir, "_static/searchtools.js"), "w") as js_file:
js_file.write(js)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.")
parser.add_argument(
"-i",
"--input_dir",
metavar="path",
required=True,
help="Input directory for Sphinx HTML.",
)
parser.add_argument(
"-o",
"--output_dir",
metavar="path",
required=True,
help="Output directory in Docusaurus.",
)
args = parser.parse_args()
parse_sphinx(args.input_dir, args.output_dir)
| ClassyVision-main | scripts/parse_sphinx.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.7.0"
| ClassyVision-main | classy_vision/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
from . import build_loss, ClassyLoss, register_loss
@register_loss("sum_arbitrary")
class SumArbitraryLoss(ClassyLoss):
"""
Sums a collection of (weighted) torch.nn losses.
NOTE: this applies all the losses to the same output and does not support
taking a list of outputs as input.
"""
def __init__(
self, losses: List[ClassyLoss], weights: Optional[Tensor] = None
) -> None:
super().__init__()
if weights is None:
weights = torch.ones((len(losses)))
self.losses = losses
self.weights = weights
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "SumArbitraryLoss":
"""Instantiates a SumArbitraryLoss from a configuration.
Args:
config: A configuration for a SumArbitraryLoss.
See :func:`__init__` for parameters expected in the config.
Returns:
A SumArbitraryLoss instance.
"""
assert (
type(config["losses"]) == list and len(config["losses"]) > 0
), "losses must be a list of registered losses with length > 0"
assert type(config["weights"]) == list and len(config["weights"]) == len(
config["losses"]
), "weights must be None or a list and have same length as losses"
loss_modules = []
for loss_config in config["losses"]:
loss_modules.append(build_loss(loss_config))
assert all(
isinstance(loss_module, ClassyLoss) for loss_module in loss_modules
), "All losses must be registered, valid ClassyLosses"
return cls(losses=loss_modules, weights=config.get("weights", None))
def forward(self, prediction, target):
for idx, loss in enumerate(self.losses):
current_loss = loss(prediction, target)
if idx == 0:
total_loss = current_loss
else:
total_loss = total_loss.add(self.weights[idx], current_loss)
return total_loss
| ClassyVision-main | classy_vision/losses/sum_arbitrary_loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import traceback
from pathlib import Path
import torch
import torch.nn.modules.loss as torch_losses
from classy_vision.generic.registry_utils import import_all_modules
from classy_vision.generic.util import log_class_usage
from .classy_loss import ClassyLoss
FILE_ROOT = Path(__file__).parent
LOSS_REGISTRY = {}
LOSS_REGISTRY_TB = {}
LOSS_CLASS_NAMES = set()
LOSS_CLASS_NAMES_TB = {}
def build_loss(config):
"""Builds a ClassyLoss from a config.
This assumes a 'name' key in the config which is used to determine what
model class to instantiate. For instance, a config `{"name": "my_loss",
"foo": "bar"}` will find a class that was registered as "my_loss"
(see :func:`register_loss`) and call .from_config on it.
In addition to losses registered with :func:`register_loss`, we also
support instantiating losses available in the `torch.nn.modules.loss <https:
//pytorch.org/docs/stable/nn.html#loss-functions>`_
module. Any keys in the config will get expanded to parameters of the loss
constructor. For instance, the following call will instantiate a
`torch.nn.modules.CrossEntropyLoss <https://pytorch.org/docs/stable/
nn.html#torch.nn.CrossEntropyLoss>`_:
.. code-block:: python
build_loss({"name": "CrossEntropyLoss", "reduction": "sum"})
"""
assert "name" in config, f"name not provided for loss: {config}"
name = config["name"]
args = copy.deepcopy(config)
del args["name"]
if "weight" in args and args["weight"] is not None:
# if we are passing weights, we need to change the weights from a list
# to a tensor
args["weight"] = torch.tensor(args["weight"], dtype=torch.float)
if name in LOSS_REGISTRY:
loss = LOSS_REGISTRY[name].from_config(config)
else:
# the name should be available in torch.nn.modules.loss
assert hasattr(torch_losses, name), (
f"{name} isn't a registered loss"
", nor is it available in torch.nn.modules.loss"
)
loss = getattr(torch_losses, name)(**args)
log_class_usage("Loss", loss.__class__)
return loss
def register_loss(name, bypass_checks=False):
"""Registers a ClassyLoss subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyLoss from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyLoss subclass, like this:
.. code-block:: python
@register_loss("my_loss")
class MyLoss(ClassyLoss):
...
To instantiate a loss from a configuration file, see
:func:`build_loss`."""
def register_loss_cls(cls):
if not bypass_checks:
if name in LOSS_REGISTRY:
msg = (
"Cannot register duplicate loss ({}). Already registered at \n{}\n"
)
raise ValueError(msg.format(name, LOSS_REGISTRY_TB[name]))
if not issubclass(cls, ClassyLoss):
raise ValueError(
"Loss ({}: {}) must extend ClassyLoss".format(name, cls.__name__)
)
tb = "".join(traceback.format_stack())
LOSS_REGISTRY[name] = cls
LOSS_CLASS_NAMES.add(cls.__name__)
LOSS_REGISTRY_TB[name] = tb
LOSS_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_loss_cls
# automatically import any Python files in the losses/ directory
import_all_modules(FILE_ROOT, "classy_vision.losses")
from .barron_loss import BarronLoss # isort:skip
from .label_smoothing_loss import LabelSmoothingCrossEntropyLoss # isort:skip
from .multi_output_sum_loss import MultiOutputSumLoss # isort:skip
from .soft_target_cross_entropy_loss import SoftTargetCrossEntropyLoss # isort:skip
from .sum_arbitrary_loss import SumArbitraryLoss # isort:skip
__all__ = [
"BarronLoss",
"ClassyLoss",
"LabelSmoothingCrossEntropyLoss",
"MultiOutputSumLoss",
"SoftTargetCrossEntropyLoss",
"SumArbitraryLoss",
"build_loss",
"register_loss",
]
| ClassyVision-main | classy_vision/losses/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
from . import build_loss, ClassyLoss, register_loss
@register_loss("multi_output_sum_loss")
class MultiOutputSumLoss(ClassyLoss):
"""
Applies the provided loss to the list of outputs (or single output) and sums
up the losses.
"""
def __init__(self, loss) -> None:
super().__init__()
self._loss = loss
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "MultiOutputSumLoss":
"""Instantiates a MultiOutputSumLoss from a configuration.
Args:
config: A configuration for a MultiOutpuSumLoss.
See :func:`__init__` for parameters expected in the config.
Returns:
A MultiOutputSumLoss instance.
"""
assert (
type(config["loss"]) == dict
), "loss must be a dict containing a configuration for a registered loss"
return cls(loss=build_loss(config["loss"]))
def forward(self, output, target):
if torch.is_tensor(output):
output = [output]
loss = 0
for pred in output:
loss += self._loss(pred, target)
return loss
| ClassyVision-main | classy_vision/losses/multi_output_sum_loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch.nn as nn
class ClassyLoss(nn.Module):
"""
Base class to calculate the loss during training.
This implementation of :class:`torch.nn.Module` allows building
the loss object from a configuration file.
"""
def __init__(self):
"""
Constructor for ClassyLoss.
"""
super(ClassyLoss, self).__init__()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyLoss":
"""Instantiates a ClassyLoss from a configuration.
Args:
config: A configuration for a ClassyLoss.
Returns:
A ClassyLoss instance.
"""
raise NotImplementedError()
def forward(self, output, target):
"""
Compute the loss for the provided sample.
Refer to :class:`torch.nn.Module` for more details.
"""
raise NotImplementedError
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyLoss.
The returned state is used for checkpointing. Note that most losses are
stateless and do not need to save any state.
Returns:
A state dictionary containing the state of the loss.
"""
return self.state_dict()
def set_classy_state(self, state: Dict[str, Any]) -> None:
"""Set the state of the ClassyLoss.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the loss from a checkpoint. Note
that most losses are stateless and do not need to load any state.
"""
return self.load_state_dict(state)
def has_learned_parameters(self) -> bool:
"""Does this loss have learned parameters?"""
return any(param.requires_grad for param in self.parameters(recurse=True))
| ClassyVision-main | classy_vision/losses/classy_loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
from . import ClassyLoss, register_loss
@register_loss("barron")
class BarronLoss(ClassyLoss):
"""
This implements the `Barron loss <https://arxiv.org/pdf/1701.03077.pdf>`_.
"""
def __init__(self, alpha, size_average, c):
super(BarronLoss, self).__init__()
self.size_average = size_average
self.alpha = alpha
self.c = c
self.z = max(1.0, 2.0 - self.alpha)
# define all three losses:
def _forward_zero(diff):
out = diff.div(self.c).pow(2.0).mul(0.5).add(1.0).log()
return out
def _forward_inf(diff):
out = 1.0 - diff.div(self.c).pow(2.0).mul(-0.5).exp()
return out
def _forward(diff):
out = diff.div(self.c).pow(2.0).div(self.z).add(1.0).pow(self.alpha / 2.0)
out.add_(-1.0).mul_(self.z / self.alpha)
return out
# set the correct loss:
if self.alpha == 0.0:
self._forward = _forward_zero
elif self.alpha == -float("inf") or self.alpha == float("inf"):
self._forward = _forward_inf
else:
self._forward = _forward
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "BarronLoss":
"""Instantiates a BarronLoss from a configuration.
Args:
config: A configuration for a BarronLoss.
See :func:`__init__` for parameters expected in the config.
Returns:
A BarronLoss instance.
"""
# Infinity is a valid alpha value but is frequently a string
config["alpha"] = float(config["alpha"])
# assertions:
assert type(config["size_average"]) == bool
assert type(config["alpha"]) == float
assert type(config["c"]) == float and config["c"] > 0.0
return cls(
alpha=config["alpha"], size_average=config["size_average"], c=config["c"]
)
def forward(self, prediction, target):
diff = torch.add(prediction, -target)
loss = self._forward(diff)
loss = loss.sum(0, keepdim=True)
if self.size_average:
loss.div_(prediction.size(0))
return loss
| ClassyVision-main | classy_vision/losses/barron_loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import torch.nn.functional as F
from classy_vision.generic.util import convert_to_one_hot
from classy_vision.losses import ClassyLoss, register_loss
@register_loss("soft_target_cross_entropy")
class SoftTargetCrossEntropyLoss(ClassyLoss):
def __init__(self, ignore_index=-100, reduction="mean", normalize_targets=True):
"""Intializer for the soft target cross-entropy loss loss.
This allows the targets for the cross entropy loss to be multilabel
Args:
ignore_index: sample should be ignored for loss if the class is this value
reduction: specifies reduction to apply to the output
normalize_targets: whether the targets should be normalized to a sum of 1
based on the total count of positive targets for a given sample
"""
super(SoftTargetCrossEntropyLoss, self).__init__()
self._ignore_index = ignore_index
self._reduction = reduction
assert isinstance(normalize_targets, bool)
self._normalize_targets = normalize_targets
if self._reduction not in ["none", "mean"]:
raise NotImplementedError(
'reduction type "{}" not implemented'.format(self._reduction)
)
self._eps = torch.finfo(torch.float32).eps
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "SoftTargetCrossEntropyLoss":
"""Instantiates a SoftTargetCrossEntropyLoss from a configuration.
Args:
config: A configuration for a SoftTargetCrossEntropyLoss.
See :func:`__init__` for parameters expected in the config.
Returns:
A SoftTargetCrossEntropyLoss instance.
"""
return cls(
ignore_index=config.get("ignore_index", -100),
reduction=config.get("reduction", "mean"),
normalize_targets=config.get("normalize_targets", True),
)
def forward(self, output, target):
"""for N examples and C classes
- output: N x C these are raw outputs (without softmax/sigmoid)
- target: N x C or N corresponding targets
Target elements set to ignore_index contribute 0 loss.
Samples where all entries are ignore_index do not contribute to the loss
reduction.
"""
# check if targets are inputted as class integers
if target.ndim == 1:
assert (
output.shape[0] == target.shape[0]
), "SoftTargetCrossEntropyLoss requires output and target to have same batch size"
target = convert_to_one_hot(target.view(-1, 1), output.shape[1])
assert output.shape == target.shape, (
"SoftTargetCrossEntropyLoss requires output and target to be same "
f"shape: {output.shape} != {target.shape}"
)
valid_mask = target != self._ignore_index
valid_targets = target.float() * valid_mask.float()
if self._normalize_targets:
valid_targets /= self._eps + valid_targets.sum(dim=1, keepdim=True)
per_sample_per_target_loss = -valid_targets * F.log_softmax(output, -1)
per_sample_loss = torch.sum(per_sample_per_target_loss, -1)
# perform reduction
if self._reduction == "mean":
# normalize based on the number of samples with > 0 non-ignored targets
loss = per_sample_loss.sum() / torch.sum(
(torch.sum(valid_mask, -1) > 0)
).clamp(min=1)
elif self._reduction == "none":
loss = per_sample_loss
return loss
| ClassyVision-main | classy_vision/losses/soft_target_cross_entropy_loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import numpy as np
from classy_vision.generic.util import convert_to_one_hot
from classy_vision.losses import ClassyLoss, register_loss
from classy_vision.losses.soft_target_cross_entropy_loss import (
SoftTargetCrossEntropyLoss,
)
@register_loss("label_smoothing_cross_entropy")
class LabelSmoothingCrossEntropyLoss(ClassyLoss):
def __init__(self, ignore_index=-100, reduction="mean", smoothing_param=None):
"""Intializer for the label smoothed cross entropy loss.
This decreases gap between output scores and encourages generalization.
Targets provided to forward can be one-hot vectors (NxC) or class indices (Nx1).
This normalizes the targets to a sum of 1 based on the total count of positive
targets for a given sample before applying label smoothing.
Args:
ignore_index: sample should be ignored for loss if the class is this value
reduction: specifies reduction to apply to the output
smoothing_param: value to be added to each target entry
"""
super().__init__()
self._ignore_index = ignore_index
self._reduction = reduction
self._smoothing_param = smoothing_param
self.loss_function = SoftTargetCrossEntropyLoss(
self._ignore_index, self._reduction, normalize_targets=False
)
self._eps = np.finfo(np.float32).eps
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "LabelSmoothingCrossEntropyLoss":
"""Instantiates a LabelSmoothingCrossEntropyLoss from a configuration.
Args:
config: A configuration for a LabelSmoothingCrossEntropyLoss.
See :func:`__init__` for parameters expected in the config.
Returns:
A LabelSmoothingCrossEntropyLoss instance.
"""
assert (
"smoothing_param" in config
), "Label Smoothing needs a smoothing parameter"
return cls(
ignore_index=config.get("ignore_index", -100),
reduction=config.get("reduction", "mean"),
smoothing_param=config.get("smoothing_param"),
)
def compute_valid_targets(self, target, classes):
"""
This function takes one-hot or index target vectors and computes valid one-hot
target vectors, based on ignore index value
"""
target_shape_list = list(target.size())
valid_mask = target != self._ignore_index
valid_targets = target.float() * valid_mask.float()
# check if targets are inputted as class integers
if len(target_shape_list) == 1 or (
len(target_shape_list) == 2 and target_shape_list[1] == 1
):
valid_targets = convert_to_one_hot(valid_targets.view(-1, 1), classes)
valid_targets = valid_targets.float() * valid_mask.view(-1, 1).float()
return valid_targets
def smooth_targets(self, valid_targets, classes):
"""
This function takes valid (No ignore values present) one-hot target vectors
and computes smoothed target vectors (normalized) according to the loss's
smoothing parameter
"""
valid_targets /= self._eps + valid_targets.sum(dim=1, keepdim=True)
if classes > 0:
smoothed_targets = valid_targets + (self._smoothing_param / classes)
smoothed_targets /= self._eps + smoothed_targets.sum(dim=1, keepdim=True)
return smoothed_targets
def forward(self, output, target):
valid_targets = self.compute_valid_targets(
target=target, classes=output.shape[1]
)
assert (
valid_targets.shape == output.shape
), "LabelSmoothingCrossEntropyLoss requires output and target to be same size"
smoothed_targets = self.smooth_targets(
valid_targets=valid_targets, classes=output.shape[1]
)
return self.loss_function(output, smoothed_targets)
| ClassyVision-main | classy_vision/losses/label_smoothing_loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from classy_vision.generic.util import (
load_and_broadcast_checkpoint,
update_classy_model,
)
from classy_vision.tasks import ClassificationTask, register_task
@register_task("fine_tuning")
class FineTuningTask(ClassificationTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pretrained_checkpoint_dict = None
self.pretrained_checkpoint_path = None
self.pretrained_checkpoint_load_strict = True
self.reset_heads = False
self.freeze_trunk = False
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "FineTuningTask":
"""Instantiates a FineTuningTask from a configuration.
Args:
config: A configuration for a FineTuningTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A FineTuningTask instance.
"""
task = super().from_config(config)
pretrained_checkpoint_path = config.get("pretrained_checkpoint")
if pretrained_checkpoint_path:
task.set_pretrained_checkpoint(pretrained_checkpoint_path)
task.set_pretrained_checkpoint_load_strict(
config.get("pretrained_checkpoint_load_strict", True)
)
task.set_reset_heads(config.get("reset_heads", False))
task.set_freeze_trunk(config.get("freeze_trunk", False))
return task
def set_pretrained_checkpoint(self, checkpoint_path: str) -> "FineTuningTask":
self.pretrained_checkpoint_path = checkpoint_path
return self
def set_pretrained_checkpoint_load_strict(
self, pretrained_checkpoint_load_strict: bool
):
self.pretrained_checkpoint_load_strict = pretrained_checkpoint_load_strict
return self
def _set_pretrained_checkpoint_dict(
self, checkpoint_dict: Dict[str, Any]
) -> "FineTuningTask":
self.pretrained_checkpoint_dict = checkpoint_dict
return self
def set_reset_heads(self, reset_heads: bool) -> "FineTuningTask":
self.reset_heads = reset_heads
return self
def set_freeze_trunk(self, freeze_trunk: bool) -> "FineTuningTask":
self.freeze_trunk = freeze_trunk
return self
def _set_model_train_mode(self):
phase = self.phases[self.phase_idx]
self.loss.train(phase["train"])
if self.freeze_trunk:
# convert all the sub-modules to the eval mode, except the heads
self.base_model.eval()
for heads in self.base_model.get_heads().values():
for h in heads:
h.train(phase["train"])
else:
self.base_model.train(phase["train"])
def prepare(self) -> None:
super().prepare()
if self.checkpoint_dict is None:
# no checkpoint exists, load the model's state from the pretrained
# checkpoint
if self.pretrained_checkpoint_path:
self.pretrained_checkpoint_dict = load_and_broadcast_checkpoint(
self.pretrained_checkpoint_path
)
assert (
self.pretrained_checkpoint_dict is not None
), "Need a pretrained checkpoint for fine tuning"
state_load_success = update_classy_model(
self.base_model,
self.pretrained_checkpoint_dict["classy_state_dict"]["base_model"],
self.reset_heads,
self.pretrained_checkpoint_load_strict,
)
assert (
state_load_success
), "Update classy state from pretrained checkpoint was unsuccessful."
if self.freeze_trunk:
# do not track gradients for all the parameters in the model except
# for the parameters in the heads
for param in self.base_model.parameters():
param.requires_grad = False
for heads in self.base_model.get_heads().values():
for h in heads:
for param in h.parameters():
param.requires_grad = True
# re-create ddp model
self.distributed_model = None
self.init_distributed_data_parallel_model()
| ClassyVision-main | classy_vision/tasks/fine_tuning_task.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.