File size: 3,491 Bytes
a01ef8c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#

import pytest

from tlt.utils.inc_utils import get_inc_config


@pytest.mark.common
@pytest.mark.parametrize('accuracy_criterion,valid',
                         [[0.1, True],
                          [-1, False],
                          [0.01, True],
                          [1.434, False],
                          ['foo', False]])
def test_inc_config_accuracy_criterion(accuracy_criterion, valid):
    """
    Tests an INC config with good and bad accuracy_criterion_relative values
    """
    if not valid:
        with pytest.raises(ValueError):
            get_inc_config(accuracy_criterion_relative=accuracy_criterion)
    else:
        config = get_inc_config(accuracy_criterion_relative=accuracy_criterion)
        assert config.accuracy_criterion.relative == accuracy_criterion


@pytest.mark.common
@pytest.mark.parametrize('timeout,valid',
                         [[0.1, False],
                          [-1, False],
                          [0, True],
                          [60, True],
                          ['foo', False]])
def test_inc_config_timeout(timeout, valid):
    """
    Tests an INC config with good and bad exit_policy_timeout values
    """
    if not valid:
        with pytest.raises(ValueError):
            get_inc_config(exit_policy_timeout=timeout)
    else:
        config = get_inc_config(exit_policy_timeout=timeout)
        assert config.timeout == timeout


@pytest.mark.common
@pytest.mark.parametrize('max_trials,valid',
                         [[0.1, False],
                          [-1, False],
                          [0, False],
                          [1, True],
                          [60, True],
                          ['foo', False]])
def test_inc_config_max_trials(max_trials, valid):
    """
    Tests an INC config with good and bad exit_policy_max_trials values
    """
    if not valid:
        with pytest.raises(ValueError):
            get_inc_config(exit_policy_max_trials=max_trials)
    else:
        config = get_inc_config(exit_policy_max_trials=max_trials)
        assert config.max_trials == max_trials


@pytest.mark.common
@pytest.mark.parametrize('approach,valid',
                         [['foo', False],
                          [-1, False],
                          [0, False],
                          ['static', True],
                          ['dynamic', True],
                          [True, False],
                          [False, False]])
def test_inc_config_approach(approach, valid):
    """
    Tests an INC config with good and bad approach values
    """
    if not valid:
        with pytest.raises(ValueError):
            get_inc_config(approach=approach)
    else:
        config = get_inc_config(approach=approach)
        assert config.approach == 'post_training_{}_quant'.format(approach)