File size: 2,737 Bytes
6c9ac8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import os
from pathlib import Path

import numpy as np
import pytest
import torch

from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules

# TODO: Waiting to fix multiple call error bug
register_all_modules()


@pytest.mark.parametrize('config,devices',
                         [('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
                           ('cpu', 'cuda'))])
def test_init_detector(config, devices):
    assert all([device in ['cpu', 'cuda'] for device in devices])

    project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
    project_dir = os.path.join(project_dir, '..')

    config_file = os.path.join(project_dir, config)

    # test init_detector with config_file: str and cfg_options
    cfg_options = dict(
        model=dict(
            backbone=dict(
                depth=18,
                init_cfg=dict(
                    type='Pretrained', checkpoint='torchvision://resnet18'))))

    for device in devices:
        if device == 'cuda' and not torch.cuda.is_available():
            pytest.skip('test requires GPU and torch+cuda')

        model = init_detector(
            config_file, device=device, cfg_options=cfg_options)

        # test init_detector with :obj:`Path`
        config_path_object = Path(config_file)
        model = init_detector(config_path_object, device=device)

        # test init_detector with undesirable type
        with pytest.raises(TypeError):
            config_list = [config_file]
            model = init_detector(config_list)  # noqa: F841


@pytest.mark.parametrize('config,devices',
                         [('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
                           ('cpu', 'cuda'))])
def test_inference_detector(config, devices):
    assert all([device in ['cpu', 'cuda'] for device in devices])

    project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
    project_dir = os.path.join(project_dir, '..')

    config_file = os.path.join(project_dir, config)

    # test init_detector with config_file: str and cfg_options
    rng = np.random.RandomState(0)
    img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
    img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)

    for device in devices:
        if device == 'cuda' and not torch.cuda.is_available():
            pytest.skip('test requires GPU and torch+cuda')

        model = init_detector(config_file, device=device)
        result = inference_detector(model, img1)
        assert isinstance(result, DetDataSample)
        result = inference_detector(model, [img1, img2])
        assert isinstance(result, list) and len(result) == 2