Publishing 2019 R2 content (#223)
[platform/upstream/dldt.git] / tools / accuracy_checker / tests / test_regression_metrics.py
1 """
2 Copyright (c) 2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8       http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16
17 import pytest
18 from accuracy_checker.metrics import MetricsExecutor
19 from accuracy_checker.representation import RegressionPrediction, RegressionAnnotation
20 from accuracy_checker.presenters import EvaluationResult
21
22
23 class TestRegressionMetric:
24     def setup_method(self):
25         self.module = 'accuracy_checker.metrics.metric_evaluator'
26
27     def test_mae_with_zero_diff_between_annotation_and_prediction(self):
28         annotations = [RegressionAnnotation('identifier', 3)]
29         predictions = [RegressionPrediction('identifier', 3)]
30         config = [{'type': 'mae'}]
31         expected = EvaluationResult(
32             pytest.approx([0.0, 0.0]),
33             None,
34             'mae',
35             'mae',
36             None,
37             {'postfix': ' ', 'scale': 1, 'names': ['mean', 'std'], 'calculate_mean': False}
38         )
39         dispatcher = MetricsExecutor(config, None)
40
41         dispatcher.update_metrics_on_batch(annotations, predictions)
42
43         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
44             assert evaluation_result == expected
45
46     def test_mae_with_negative_diff_between_annotation_and_prediction(self):
47         annotations = [RegressionAnnotation('identifier', 3), RegressionAnnotation('identifier2', 1)]
48         predictions = [RegressionPrediction('identifier', 5), RegressionPrediction('identifier2', 5)]
49         config = [{'type': 'mae'}]
50         expected = EvaluationResult(
51             pytest.approx([3.0, 1.0]),
52             None,
53             'mae',
54             'mae',
55             None,
56             {'postfix': ' ', 'scale': 1, 'names': ['mean', 'std'], 'calculate_mean': False}
57         )
58         dispatcher = MetricsExecutor(config, None)
59
60         dispatcher.update_metrics_on_batch(annotations, predictions)
61
62         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
63             assert evaluation_result == expected
64
65     def test_mae_with_positive_diff_between_annotation_and_prediction(self):
66         annotations = [RegressionAnnotation('identifier', 3), RegressionAnnotation('identifier2', 1)]
67         predictions = [RegressionPrediction('identifier', 1), RegressionPrediction('identifier2', -3)]
68         config = [{'type': 'mae'}]
69         expected = EvaluationResult(
70             pytest.approx([3.0, 1.0]),
71             None,
72             'mae',
73             'mae',
74             None,
75             {'postfix': ' ', 'scale': 1, 'names': ['mean', 'std'], 'calculate_mean': False}
76         )
77         dispatcher = MetricsExecutor(config, None)
78
79         dispatcher.update_metrics_on_batch(annotations, predictions)
80
81         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
82             assert evaluation_result == expected
83
84     def test_mse_with_zero_diff_between_annotation_and_prediction(self):
85         annotations = [RegressionAnnotation('identifier', 3)]
86         predictions = [RegressionPrediction('identifier', 3)]
87         config = [{'type': 'mse'}]
88         expected = EvaluationResult(
89             pytest.approx([0.0, 0.0]),
90             None,
91             'mse',
92             'mse',
93             None,
94             {'postfix': ' ', 'scale': 1, 'names': ['mean', 'std'], 'calculate_mean': False}
95         )
96         dispatcher = MetricsExecutor(config, None)
97
98         dispatcher.update_metrics_on_batch(annotations, predictions)
99
100         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
101             assert evaluation_result == expected
102
103     def test_mse_with_negative_diff_between_annotation_and_prediction(self):
104         annotations = [RegressionAnnotation('identifier', 3), RegressionAnnotation('identifier2', 1)]
105         predictions = [RegressionPrediction('identifier', 5), RegressionPrediction('identifier2', 5)]
106         config = [{'type': 'mse'}]
107         expected = EvaluationResult(
108             pytest.approx([10.0, 6.0]),
109             None,
110             'mse',
111             'mse',
112             None,
113             {'postfix': ' ', 'scale': 1, 'names': ['mean', 'std'], 'calculate_mean': False}
114         )
115         dispatcher = MetricsExecutor(config, None)
116
117         dispatcher.update_metrics_on_batch(annotations, predictions)
118
119         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
120             assert evaluation_result == expected
121
122     def test_mse_with_positive_diff_between_annotation_and_prediction(self):
123         annotations = [RegressionAnnotation('identifier', 3), RegressionAnnotation('identifier2', 1)]
124         predictions = [RegressionPrediction('identifier', 1), RegressionPrediction('identifier2', -3)]
125         config = [{'type': 'mse'}]
126         expected = EvaluationResult(
127             pytest.approx([10.0, 6.0]),
128             None,
129             'mse',
130             'mse',
131             None,
132             {'postfix': ' ', 'scale': 1, 'names': ['mean', 'std'], 'calculate_mean': False}
133         )
134         dispatcher = MetricsExecutor(config, None)
135
136         dispatcher.update_metrics_on_batch(annotations, predictions)
137
138         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
139             assert evaluation_result == expected
140
141     def test_missed_interval(self):
142         config = [{'type': 'mae_on_interval'}]
143         with pytest.raises(ValueError):
144             MetricsExecutor(config, None)
145
146     def test_mae_on_interval_default_all_missed(self):
147         annotations = [RegressionAnnotation('identifier', -2)]
148         predictions = [RegressionPrediction('identifier', 1)]
149         config = [{'type': 'mae_on_interval', 'end': 1}]
150         expected = EvaluationResult(
151             pytest.approx([0.0]),
152             None,
153             'mae_on_interval',
154             'mae_on_interval',
155             None,
156             {'postfix': ' ', 'scale': 1, 'names': [], 'calculate_mean': False}
157         )
158         dispatcher = MetricsExecutor(config, None)
159
160         dispatcher.update_metrics_on_batch(annotations, predictions)
161
162         with pytest.warns(UserWarning) as warnings:
163             for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
164                 assert len(warnings) == 1
165                 assert evaluation_result == expected
166
167     def test_mae_on_interval_default_all_not_in_range_not_ignore_out_of_range(self):
168         annotations = [RegressionAnnotation('identifier', -1), RegressionAnnotation('identifier', 2)]
169         predictions = [RegressionPrediction('identifier', 1), RegressionPrediction('identifier', 2)]
170         expected = EvaluationResult(
171             pytest.approx([2.0, 0.0, 0.0, 0.0]),
172             None,
173             'mae_on_interval',
174             'mae_on_interval',
175             None,
176             {
177                 'postfix': ' ',
178                 'scale': 1,
179                 'names': ['mean: < 0.0', 'std: < 0.0', 'mean: > 1.0', 'std: > 1.0'],
180                 'calculate_mean': False
181             }
182         )
183         config = [{'type': 'mae_on_interval', 'end': 1, 'ignore_values_not_in_interval': False}]
184         dispatcher = MetricsExecutor(config, None)
185
186         dispatcher.update_metrics_on_batch(annotations, predictions)
187
188         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
189             assert evaluation_result == expected
190
191     def test_mae_on_interval_values_in_range(self):
192         annotations = [RegressionAnnotation('identifier', 0.5), RegressionAnnotation('identifier', 0.5)]
193         predictions = [RegressionPrediction('identifier', 1), RegressionPrediction('identifier', 0.25)]
194         config = [{'type': 'mae_on_interval', 'end': 1}]
195         expected = EvaluationResult(
196             pytest.approx([0.375, 0.125]),
197             None,
198             'mae_on_interval',
199             'mae_on_interval',
200             None,
201             {'postfix': ' ', 'scale': 1, 'names': ['mean: <= 0.0 < 1.0', 'std: <= 0.0 < 1.0'], 'calculate_mean': False}
202         )
203         dispatcher = MetricsExecutor(config, None)
204
205         dispatcher.update_metrics_on_batch(annotations, predictions)
206
207         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
208             assert evaluation_result == expected
209
210     def test_mae_on_interval_default_not_ignore_out_of_range(self):
211         annotations = [
212             RegressionAnnotation('identifier', -1),
213             RegressionAnnotation('identifier',  2),
214             RegressionAnnotation('identifier', 0.5)
215         ]
216         predictions = [
217             RegressionPrediction('identifier', 1),
218             RegressionPrediction('identifier', 2),
219             RegressionPrediction('identifier', 1)
220         ]
221         config = [{'type': 'mae_on_interval', 'end': 1, 'ignore_values_not_in_interval': False}]
222         expected = EvaluationResult(
223             pytest.approx([2.0, 0.0, 0.5, 0.0,  0.0, 0.0]),
224             None,
225             'mae_on_interval',
226             'mae_on_interval',
227             None,
228             {
229                 'postfix': ' ',
230                 'scale': 1,
231                 'names': [
232                     'mean: < 0.0',
233                     'std: < 0.0',
234                     'mean: <= 0.0 < 1.0',
235                     'std: <= 0.0 < 1.0',
236                     'mean: > 1.0',
237                     'std: > 1.0'
238                 ],
239                 'calculate_mean': False
240             }
241         )
242         dispatcher = MetricsExecutor(config, None)
243
244         dispatcher.update_metrics_on_batch(annotations, predictions)
245
246         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
247             assert evaluation_result == expected
248
249     def test_mae_on_interval_with_given_interval(self):
250         annotations = [
251             RegressionAnnotation('identifier', -1),
252             RegressionAnnotation('identifier',  2),
253             RegressionAnnotation('identifier',  1)
254         ]
255         predictions = [
256             RegressionPrediction('identifier', 1),
257             RegressionPrediction('identifier', 3),
258             RegressionPrediction('identifier', 1)
259         ]
260         config = [{'type': 'mae_on_interval', 'intervals': [0.0, 2.0, 4.0]}]
261         expected = EvaluationResult(
262             pytest.approx([0.0, 0.0, 1.0, 0.0]),
263             None,
264             'mae_on_interval',
265             'mae_on_interval',
266             None,
267             {
268                 'postfix': ' ',
269                 'scale': 1,
270                 'names': ['mean: <= 0.0 < 2.0', 'std: <= 0.0 < 2.0', 'mean: <= 2.0 < 4.0', 'std: <= 2.0 < 4.0'],
271                 'calculate_mean': False
272             }
273         )
274         dispatcher = MetricsExecutor(config, None)
275
276         dispatcher.update_metrics_on_batch(annotations, predictions)
277
278         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
279             assert evaluation_result == expected
280
281     def test_mae_on_interval_with_repeated_values(self):
282         annotations = [
283             RegressionAnnotation('identifier', -1),
284             RegressionAnnotation('identifier',  2),
285             RegressionAnnotation('identifier', 1)
286         ]
287         predictions = [
288             RegressionPrediction('identifier', 1),
289             RegressionPrediction('identifier', 3),
290             RegressionPrediction('identifier', 1)
291         ]
292         config = [{'type': 'mae_on_interval', 'intervals': [0.0, 2.0, 2.0, 4.0]}]
293         expected = EvaluationResult(
294             pytest.approx([0.0, 0.0, 1.0, 0.0]),
295             None,
296             'mae_on_interval',
297             'mae_on_interval',
298             None,
299             {
300                 'postfix': ' ',
301                 'scale': 1,
302                 'names': ['mean: <= 0.0 < 2.0', 'std: <= 0.0 < 2.0', 'mean: <= 2.0 < 4.0', 'std: <= 2.0 < 4.0'],
303                 'calculate_mean': False
304             }
305         )
306         dispatcher = MetricsExecutor(config, None)
307
308         dispatcher.update_metrics_on_batch(annotations, predictions)
309
310         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
311             assert evaluation_result == expected
312
313     def test_mae_on_interval_with_unsorted_values(self):
314         annotations = [
315             RegressionAnnotation('identifier', -1),
316             RegressionAnnotation('identifier',  2),
317             RegressionAnnotation('identifier',  1)
318         ]
319         predictions = [
320             RegressionPrediction('identifier', 1),
321             RegressionPrediction('identifier', 3),
322             RegressionPrediction('identifier', 1)
323         ]
324         config = [{'type': 'mae_on_interval', 'intervals': [2.0,  0.0, 4.0]}]
325         expected = EvaluationResult(
326             pytest.approx([0.0, 0.0, 1.0, 0.0]),
327             None,
328             'mae_on_interval',
329             'mae_on_interval',
330             None,
331             {
332                 'postfix': ' ', 'scale': 1,
333                 'names': ['mean: <= 0.0 < 2.0', 'std: <= 0.0 < 2.0', 'mean: <= 2.0 < 4.0', 'std: <= 2.0 < 4.0'],
334                 'calculate_mean': False
335             }
336         )
337         dispatcher = MetricsExecutor(config, None)
338
339         dispatcher.update_metrics_on_batch(annotations, predictions)
340
341         for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
342             assert evaluation_result == expected