2 Copyright (c) 2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
19 from accuracy_checker.metrics import DetectionMAP
20 from accuracy_checker.metrics.detection import Recall, bbox_match
21 from accuracy_checker.metrics.overlap import IOU, IOA
22 from tests.common import (make_representation, single_class_dataset, multi_class_dataset,
23 multi_class_dataset_without_background)
26 def _test_metric_wrapper(metric_cls, dataset, **kwargs):
27 provider = metric_cls.__provider__
28 config = {'type': provider, 'name': provider}
29 config.update(**kwargs)
30 return metric_cls(config, dataset, provider)
34 def test_single(self):
38 gt = make_representation(gt, is_ground_truth=True)
39 pred = make_representation(pred, score=1)
40 overlap_evaluator = IOU({})
42 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
46 def test_single_with_ignored_tp(self):
50 gt = make_representation(gt, is_ground_truth=True)
51 pred = make_representation(pred, score=1)
52 pred[0].metadata['difficult_boxes'] = [0]
53 overlap_evaluator = IOU({})
55 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
59 def test_single_with_use_filtered_tp(self):
63 gt = make_representation(gt, is_ground_truth=True)
64 pred = make_representation(pred, score=1)
65 pred[0].metadata['difficult_boxes'] = [0]
66 overlap_evaluator = IOU({})
68 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator, use_filtered_tp=True)
72 def test_single_non_overlap(self):
73 gt = make_representation("0 5 5 10 10", is_ground_truth=True)
74 pred = make_representation("0 0 0 5 5", score=1)
75 overlap_evaluator = IOU({})
77 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
81 def test_single_non_overlap_ignored(self):
82 gt = make_representation("0 5 5 10 10", is_ground_truth=True)
83 pred = make_representation("0 0 0 5 5", score=1)
84 pred[0].metadata['difficult_boxes'] = [0]
85 overlap_evaluator = IOU({})
87 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
91 def test_multiple(self):
92 gt = make_representation("0 0 0 5 5; 0 7 7 8 8", is_ground_truth=True)
93 pred = make_representation("0 0 0 5 5; 0 7 7 8 8", score=1)
94 overlap_evaluator = IOU({})
96 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
102 def test_multiple_2(self):
103 gt = make_representation("0 0 0 5 5; 0 9 9 10 10", is_ground_truth=True)
104 pred = make_representation("1 0 0 0 5 5; 0.8 0 7 7 8 8")
105 overlap_evaluator = IOU({})
107 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
113 def test_multi_label(self):
114 gt = make_representation("1 0 0 5 5; 0 9 9 10 10", is_ground_truth=True)
115 pred = make_representation("1 1 0 0 5 5; 0.8 0 7 7 8 8")
116 overlap_evaluator = IOU({})
118 tp, fp, _, _ = bbox_match(gt, pred, 1, overlap_evaluator)
119 assert tp.shape[0] == 1
123 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
124 assert tp.shape[0] == 1
128 def test_multi_image(self):
129 gt = make_representation(["0 0 0 5 5", "0 0 0 5 5"], is_ground_truth=True)
130 pred = make_representation(["0 0 0 5 5", "0 0 0 5 5"], score=1)
131 overlap_evaluator = IOU({})
133 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
139 def test_false_negative(self):
140 gt = make_representation("0 0 0 5 5; 0 1 1 6 6", is_ground_truth=True)
141 pred = make_representation("0 0 0 5 5", score=1)
142 overlap_evaluator = IOU({})
144 tp, fp, _, ngt = bbox_match(gt, pred, 0, overlap_evaluator)
146 assert tp.shape[0] == 1
149 def test_multiple_detections(self):
150 gt = make_representation("0 0 0 5 5", is_ground_truth=True)
151 pred = make_representation("1 0 0 0 5 5; 0.9 0 0 0 5 5")
152 overlap_evaluator = IOU({})
154 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
158 def test_no_annotations(self):
162 gt = make_representation(gt, is_ground_truth=True)
163 pred = make_representation(pred, score=1)
164 overlap_evaluator = IOU({})
166 tp, fp, _, _ = bbox_match(gt, pred, 0, overlap_evaluator)
170 def test_no_predictions(self):
174 gt = make_representation(gt, is_ground_truth=True)
175 pred = make_representation(pred, score=1)
176 overlap_evaluator = IOU({})
178 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator)
183 def test_iou_empty_prediction_box(self):
187 gt = make_representation(gt, is_ground_truth=True)
188 pred = make_representation(pred, score=1)
189 overlap_evaluator = IOU({})
191 with pytest.warns(None) as warnings:
192 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator)
193 assert len(warnings) == 0
198 def test_ioa_empty_prediction_box(self):
202 gt = make_representation(gt, is_ground_truth=True)
203 pred = make_representation(pred, score=1)
204 overlap_evaluator = IOA({})
206 with pytest.warns(None) as warnings:
207 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator)
208 assert len(warnings) == 0
213 def test_iou_zero_union(self):
217 gt = make_representation(gt, is_ground_truth=True)
218 pred = make_representation(pred, score=1)
219 overlap_evaluator = IOA({})
221 with pytest.warns(None) as warnings:
222 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator)
223 assert len(warnings) == 0
228 def test_single_difficult(self):
232 gt = make_representation(gt, is_ground_truth=True)
233 pred = make_representation(pred, score=1)
234 gt[0].metadata['difficult_boxes'] = [0]
235 overlap_evaluator = IOU({})
237 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator, ignore_difficult=True)
242 def test_single_with_not_ignore_difficult(self):
246 gt = make_representation(gt, is_ground_truth=True)
247 pred = make_representation(pred, score=1)
248 gt[0].metadata['difficult_boxes'] = [0]
249 overlap_evaluator = IOU({})
251 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator, ignore_difficult=False)
256 def test_single_difficult_non_overlap(self):
257 gt = make_representation("0 5 5 10 10", is_ground_truth=True)
258 gt[0].metadata['difficult_boxes'] = [0]
259 pred = make_representation("0 0 0 5 5", score=1)
260 overlap_evaluator = IOU({})
262 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator)
267 def test_single_difficult_non_overlap_not_ignore_difficult(self):
268 gt = make_representation("0 5 5 10 10", is_ground_truth=True)
269 gt[0].metadata['difficult_boxes'] = [0]
270 pred = make_representation("0 0 0 5 5", score=1)
271 overlap_evaluator = IOU({})
273 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator, ignore_difficult=False)
278 def test_multiple_detections_with_ignore_difficult(self):
279 gt = make_representation("0 0 0 5 5", is_ground_truth=True)
280 pred = make_representation("1 0 0 0 5 5; 0.9 0 0 0 5 5")
281 gt[0].metadata['difficult_boxes'] = [0]
282 overlap_evaluator = IOU({})
284 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator, ignore_difficult=True)
291 def test_multiple_detections_with_not_ignore_difficult(self):
292 gt = make_representation("0 0 0 5 5", is_ground_truth=True)
293 pred = make_representation("1 0 0 0 5 5; 0.9 0 0 0 5 5")
294 gt[0].metadata['difficult_boxes'] = [0]
295 overlap_evaluator = IOU({})
297 tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator, ignore_difficult=False)
304 def test_multiple_detections_with_ignore_difficult_and_not_allow_multiple_matches_per_ignored(self):
305 gt = make_representation("0 0 0 5 5", is_ground_truth=True)
306 pred = make_representation("1 0 0 0 5 5; 0.9 0 0 0 5 5")
307 gt[0].metadata['difficult_boxes'] = [0]
308 overlap_evaluator = IOU({})
310 tp, fp, _, n = bbox_match(
311 gt, pred, 0, overlap_evaluator,
312 ignore_difficult=True, allow_multiple_matches_per_ignored=False
323 def test_one_object(self):
324 gt = make_representation(["0 0 0 5 5"], is_ground_truth=True)
325 pred = make_representation(["0 0 0 5 5"], score=1)
326 metric = _test_metric_wrapper(Recall, single_class_dataset())
327 assert 1 == metric(gt, pred)[0]
328 assert metric.meta.get('names') == ['dog']
330 def test_two_objects(self):
331 gt = make_representation(["0 0 0 5 5; 0 10 10 20 20"], is_ground_truth=True)
332 pred = make_representation(["0 0 0 5 5; 0 10 10 20 20"], score=1)
333 assert 1 == _test_metric_wrapper(Recall, single_class_dataset())(gt, pred)[0]
335 def test_false_positive(self):
336 gt2 = make_representation(["0 10 10 20 20"], is_ground_truth=True)
337 pred2 = make_representation(["0 0 0 5 5"], score=1)
338 metric = _test_metric_wrapper(Recall, single_class_dataset())
339 assert 0 == metric(gt2, pred2)[0]
340 assert metric.meta.get('names') == ['dog']
342 gt1 = make_representation(["0 0 0 5 5"], is_ground_truth=True)
343 pred1 = make_representation(["0 0 0 5 5; 0 10 10 20 20"], score=1)
344 assert 1 == metric(gt1, pred1)[0]
345 assert metric.meta.get('names') == ['dog']
347 def test_false_negative(self):
348 gt = make_representation(["0 10 10 20 20; 0 0 0 5 5"], is_ground_truth=True)
349 pred = make_representation(["0 0 0 5 5"], score=1)
350 metric = _test_metric_wrapper(Recall, single_class_dataset())
351 assert 0.5 == metric(gt, pred)[0]
352 assert metric.meta.get('names') == ['dog']
354 def test_duplicate_detections(self):
355 gt = make_representation(["0 0 0 5 5"], is_ground_truth=True)
356 pred = make_representation(["0 0 0 5 5; 0 0 0 5 5"], score=1)
358 metric = _test_metric_wrapper(Recall, single_class_dataset())
359 assert 1 == metric(gt, pred)[0]
360 assert metric.meta.get('names') == ['dog']
362 def test_no_warnings_in_recall_calculation(self):
363 gt = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], is_ground_truth=True)
364 pred = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], score=1)
366 with pytest.warns(None) as warnings:
367 _test_metric_wrapper(Recall, multi_class_dataset())(gt, pred)
368 assert len(warnings) == 0
370 def test_on_dataset_without_background(self):
371 gt = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], is_ground_truth=True)
372 pred = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], score=1)
374 with pytest.warns(None) as warnings:
375 _test_metric_wrapper(Recall, multi_class_dataset_without_background())(gt, pred)
376 assert len(warnings) == 0
378 def test_not_gt_boxes_for_matching(self):
379 gt = make_representation(["0 0 0 5 5"], is_ground_truth=True)
380 pred = make_representation(["1 0 0 5 5"], score=1)
382 metric = _test_metric_wrapper(Recall, multi_class_dataset_without_background())
383 assert 0 == metric(gt, pred)[0]
384 assert metric.meta.get('names') == ['cat']
388 def test_selects_all_detections(self):
389 gt = make_representation(["0 0 0 5 5"], is_ground_truth=True)
390 pred = make_representation(["0 0 0 5 5; 0 0 0 5 5"], score=1)
392 metric = _test_metric_wrapper(DetectionMAP, single_class_dataset())
395 assert not metric.distinct_conf
396 assert metric.overlap_threshold == 0.5
397 assert metric.ignore_difficult
398 assert metric.meta.get('names') == ['dog']
400 def test_no_warnings_in_map_calculation(self):
401 gt = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], is_ground_truth=True)
402 pred = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], score=1)
404 with pytest.warns(None) as warnings:
405 _test_metric_wrapper(DetectionMAP, multi_class_dataset())(gt, pred)
406 assert len(warnings) == 0
408 def test_perfect_detection(self):
409 gt = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], is_ground_truth=True)
410 pred = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], score=1)
412 metric = _test_metric_wrapper(DetectionMAP, multi_class_dataset())
413 assert metric(gt, pred) == [1.0, 1.0]
414 assert metric.meta.get('names') == ['dog', 'cat']
416 def test_one_false_alarm(self):
417 gt = make_representation(["0 0 0 5 5", "1 0 0 5 5"], is_ground_truth=True)
418 pred = make_representation(["1 10 10 20 20; 0 0 0 5 5", "1 0 0 5 5"], score=1)
419 metric = _test_metric_wrapper(DetectionMAP, multi_class_dataset())
420 values = metric(gt, pred)
421 assert values == [1.0, 0.5]
422 map_ = np.mean(values)
424 assert metric.meta.get('names') == ['dog', 'cat']
426 def test_zero_detection(self):
427 gt = make_representation(["0 0 0 5 5; 1 10 10 20 20"], is_ground_truth=True)
428 pred = make_representation(["0 30 30 40 40"], score=1)
430 metric = _test_metric_wrapper(DetectionMAP, multi_class_dataset())
431 assert metric(gt, pred) == [0.0]
432 assert metric.meta.get('names') == ['dog']
434 def test_no_detections_warn_user_warning(self):
435 gt = make_representation(["0 0 0 5 5; 1 10 10 20 20"], is_ground_truth=True)
436 pred = make_representation("", score=1)
437 with pytest.warns(UserWarning) as warnings:
438 map_ = _test_metric_wrapper(DetectionMAP, multi_class_dataset())(gt, pred)[0]
439 assert len(warnings) == 1
443 def test_detection_on_dataset_without_background(self):
444 gt = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], is_ground_truth=True)
445 pred = make_representation(["0 0 0 5 5; 1 10 10 20 20", "1 0 0 5 5"], score=1)
447 with pytest.warns(None) as warnings:
448 map_ = _test_metric_wrapper(DetectionMAP, multi_class_dataset_without_background())(gt, pred)
451 assert len(warnings) == 0
453 def test_not_gt_boxes_for_box_matching(self):
454 gt = make_representation(["0 0 0 5 5"], is_ground_truth=True)
455 pred = make_representation(["1 0 0 5 5"], score=1)
457 metric = _test_metric_wrapper(Recall, multi_class_dataset_without_background())
458 assert 0 == metric(gt, pred)[0]
459 assert metric.meta.get('names') == ['cat']