2 Copyright (c) 2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
18 from accuracy_checker.config import ConfigError
19 from accuracy_checker.metrics import ClassificationAccuracy, MetricsExecutor
20 from accuracy_checker.metrics.metric import Metric
21 from accuracy_checker.representation import (
22 ClassificationAnnotation,
23 ClassificationPrediction,
29 from .common import DummyDataset
33 def setup_method(self):
34 self.module = 'accuracy_checker.metrics.metric_evaluator'
36 def test_missed_metrics_raises_config_error_exception(self):
37 config = {'annotation': 'custom'}
39 with pytest.raises(ConfigError):
40 MetricsExecutor(config, None)
42 def test_missed_metrics_raises_config_error_exception_with_custom_name(self):
43 config = {'name': 'some_name', 'annotation': 'custom'}
45 with pytest.raises(ConfigError):
46 MetricsExecutor(config, None)
48 def test_empty_metrics_raises_config_error_exception(self):
49 config = {'annotation': 'custom', 'metrics': []}
51 with pytest.raises(ConfigError):
52 MetricsExecutor(config, None)
54 def test_metrics_with_empty_entry_raises_config_error_exception(self):
55 config = {'annotation': 'custom', 'metrics': [{}]}
57 with pytest.raises(ConfigError):
58 MetricsExecutor(config, None)
60 def test_missed_metric_type_raises_config_error_exception(self):
61 config = {'annotation': 'custom', 'metrics': [{'undefined': ''}]}
63 with pytest.raises(ConfigError):
64 MetricsExecutor(config, None)
66 def test_undefined_metric_type_raises_config_error_exception(self):
67 config = {'annotation': 'custom', 'metrics': [{'type': ''}]}
69 with pytest.raises(ConfigError):
70 MetricsExecutor(config, None)
72 def test_accuracy_arguments(self):
73 config = {'annotation': 'custom', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
75 dispatcher = MetricsExecutor(config, None)
76 assert len(dispatcher.metrics) == 1
77 _, accuracy_metric, _, _, _ = dispatcher.metrics[0]
78 assert isinstance(accuracy_metric, ClassificationAccuracy)
79 assert accuracy_metric.top_k == 1
81 def test_accuracy_with_several_annotation_source_raises_config_error_exception(self):
83 'annotation': 'custom',
84 'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'annotation1, annotation2'}]
86 with pytest.raises(ConfigError):
87 MetricsExecutor(config, None)
89 def test_accuracy_with_several_prediction_source_raises_value_error_exception(self):
91 'annotation': 'custom',
92 'metrics': [{'type': 'accuracy', 'top_k': 1, 'prediction_source': 'prediction1, prediction2'}]
94 with pytest.raises(ConfigError):
95 MetricsExecutor(config, None)
97 def test_accuracy_on_container_with_wrong_annotation_source_name_raise_config_error_exception(self):
98 annotations = [ContainerAnnotation({'annotation': ClassificationAnnotation('identifier', 3)})]
99 predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
100 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a'}]}
102 dispatcher = MetricsExecutor(config, None)
103 with pytest.raises(ConfigError):
104 dispatcher.update_metrics_on_batch(annotations, predictions)
106 def test_accuracy_with_wrong_annotation_type_raise_config_error_exception(self):
107 annotations = [DetectionAnnotation('identifier', 3)]
108 predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
110 'annotation': 'mocked',
111 'metrics': [{'type': 'accuracy', 'top_k': 1}]
114 dispatcher = MetricsExecutor(config, None)
115 with pytest.raises(ConfigError):
116 dispatcher.update_metrics_on_batch(annotations, predictions)
118 def test_accuracy_with_unsupported_annotations_in_container_raise_config_error_exception(self):
119 annotations = [ContainerAnnotation({'annotation': DetectionAnnotation('identifier', 3)})]
120 predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
122 'annotation': 'mocked',
123 'metrics': [{'type': 'accuracy', 'top_k': 1}]
126 dispatcher = MetricsExecutor(config, None)
127 with pytest.raises(ConfigError):
128 dispatcher.update_metrics_on_batch(annotations, predictions)
130 def test_accuracy_with_unsupported_annotation_type_as_annotation_source_for_container_raises_config_error(self):
131 annotations = [ContainerAnnotation({'annotation': DetectionAnnotation('identifier', 3)})]
132 predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
134 'annotation': 'mocked',
135 'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'annotation'}]
138 dispatcher = MetricsExecutor(config, None)
139 with pytest.raises(ConfigError):
140 dispatcher.update_metrics_on_batch(annotations, predictions)
142 def test_accuracy_on_annotation_container_with_several_suitable_representations_config_value_error_exception(self):
143 annotations = [ContainerAnnotation({
144 'annotation1': ClassificationAnnotation('identifier', 3),
145 'annotation2': ClassificationAnnotation('identifier', 3)
147 predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
148 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
150 dispatcher = MetricsExecutor(config, None)
151 with pytest.raises(ConfigError):
152 dispatcher.update_metrics_on_batch(annotations, predictions)
154 def test_accuracy_with_wrong_prediction_type_raise_config_error_exception(self):
155 annotations = [ClassificationAnnotation('identifier', 3)]
156 predictions = [DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
157 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
159 dispatcher = MetricsExecutor(config, None)
160 with pytest.raises(ConfigError):
161 dispatcher.update_metrics_on_batch(annotations, predictions)
163 def test_accuracy_with_unsupported_prediction_in_container_raise_config_error_exception(self):
164 annotations = [ClassificationAnnotation('identifier', 3)]
165 predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
166 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
168 dispatcher = MetricsExecutor(config, None)
169 with pytest.raises(ConfigError):
170 dispatcher.update_metrics_on_batch(annotations, predictions)
172 def test_accuracy_with_unsupported_prediction_type_as_prediction_source_for_container_raises_config_error(self):
173 annotations = [ClassificationAnnotation('identifier', 3)]
174 predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
176 'annotation': 'mocked',
177 'metrics': [{'type': 'accuracy', 'top_k': 1, 'prediction_source': 'prediction'}]
180 dispatcher = MetricsExecutor(config, None)
181 with pytest.raises(ConfigError):
182 dispatcher.update_metrics_on_batch(annotations, predictions)
184 def test_accuracy_on_prediction_container_with_several_suitable_representations_raise_config_error_exception(self):
185 annotations = [ClassificationAnnotation('identifier', 3)]
186 predictions = [ContainerPrediction({
187 'prediction1': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]),
188 'prediction2': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
190 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
192 dispatcher = MetricsExecutor(config, None)
193 with pytest.raises(ConfigError):
194 dispatcher.update_metrics_on_batch(annotations, predictions)
196 def test_complete_accuracy(self):
197 annotations = [ClassificationAnnotation('identifier', 3)]
198 predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
199 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
201 dispatcher = MetricsExecutor(config, None)
202 dispatcher.update_metrics_on_batch(annotations, predictions)
204 for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
205 assert evaluation_result.name == 'accuracy'
206 assert evaluation_result.evaluated_value == pytest.approx(1.0)
207 assert evaluation_result.reference_value is None
208 assert evaluation_result.threshold is None
210 def test_complete_accuracy_with_container_default_sources(self):
211 annotations = [ContainerAnnotation({'a': ClassificationAnnotation('identifier', 3)})]
212 predictions = [ContainerPrediction({'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
213 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
215 dispatcher = MetricsExecutor(config, None)
216 dispatcher.update_metrics_on_batch(annotations, predictions)
218 for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
219 assert evaluation_result.name == 'accuracy'
220 assert evaluation_result.evaluated_value == pytest.approx(1.0)
221 assert evaluation_result.reference_value is None
222 assert evaluation_result.threshold is None
224 def test_complete_accuracy_with_container_sources(self):
225 annotations = [ContainerAnnotation({'a': ClassificationAnnotation('identifier', 3)})]
226 predictions = [ContainerPrediction({'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
228 'annotation': 'mocked',
229 'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a', 'prediction_source': 'p'}]
232 dispatcher = MetricsExecutor(config, None)
233 dispatcher.update_metrics_on_batch(annotations, predictions)
235 for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
236 assert evaluation_result.name == 'accuracy'
237 assert evaluation_result.evaluated_value == pytest.approx(1.0)
238 assert evaluation_result.reference_value is None
239 assert evaluation_result.threshold is None
241 def test_zero_accuracy(self):
242 annotation = [ClassificationAnnotation('identifier', 2)]
243 prediction = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
244 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
246 dispatcher = MetricsExecutor(config, None)
248 for _, evaluation_result in dispatcher.iterate_metrics([annotation], [prediction]):
249 assert evaluation_result.name == 'accuracy'
250 assert evaluation_result.evaluated_value == 0.0
251 assert evaluation_result.reference_value is None
252 assert evaluation_result.threshold is None
254 def test_complete_accuracy_top_3(self):
255 annotations = [ClassificationAnnotation('identifier', 3)]
256 predictions = [ClassificationPrediction('identifier', [1.0, 3.0, 4.0, 2.0])]
257 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3}]}
259 dispatcher = MetricsExecutor(config, None)
260 dispatcher.update_metrics_on_batch(annotations, predictions)
262 for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
263 assert evaluation_result.name == 'accuracy'
264 assert evaluation_result.evaluated_value == pytest.approx(1.0)
265 assert evaluation_result.reference_value is None
266 assert evaluation_result.threshold is None
268 def test_zero_accuracy_top_3(self):
269 annotations = [ClassificationAnnotation('identifier', 3)]
270 predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])]
271 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3}]}
273 dispatcher = MetricsExecutor(config, None)
275 for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
276 assert evaluation_result.name == 'accuracy'
277 assert evaluation_result.evaluated_value == 0.0
278 assert evaluation_result.reference_value is None
279 assert evaluation_result.threshold is None
281 def test_reference_is_10_by_config(self):
282 annotations = [ClassificationAnnotation('identifier', 3)]
283 predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])]
284 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3, 'reference': 10}]}
286 dispatcher = MetricsExecutor(config, None)
288 for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
289 assert evaluation_result.name == 'accuracy'
290 assert evaluation_result.evaluated_value == 0.0
291 assert evaluation_result.reference_value == 10
292 assert evaluation_result.threshold is None
294 def test_threshold_is_10_by_config(self):
295 annotations = [ClassificationAnnotation('identifier', 3)]
296 predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])]
297 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3, 'threshold': 10}]}
299 dispatcher = MetricsExecutor(config, None)
301 for _, evaluation_result in dispatcher.iterate_metrics([annotations], [predictions]):
302 assert evaluation_result.name == 'accuracy'
303 assert evaluation_result.evaluated_value == 0.0
304 assert evaluation_result.reference_value is None
305 assert evaluation_result.threshold == 10
307 def test_classification_per_class_accuracy_fully_zero_prediction(self):
308 annotation = ClassificationAnnotation('identifier', 0)
309 prediction = ClassificationPrediction('identifier', [1.0, 2.0])
310 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
311 dataset = DummyDataset(label_map={0: '0', 1: '1'})
312 dispatcher = MetricsExecutor(config, dataset)
313 dispatcher.update_metrics_on_batch([annotation], [prediction])
314 for _, evaluation_result in dispatcher.iterate_metrics([annotation], [prediction]):
315 assert evaluation_result.name == 'accuracy_per_class'
316 assert len(evaluation_result.evaluated_value) == 2
317 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
318 assert evaluation_result.evaluated_value[1] == pytest.approx(0.0)
319 assert evaluation_result.reference_value is None
320 assert evaluation_result.threshold is None
322 def test_classification_per_class_accuracy_partially_zero_prediction(self):
323 annotation = [ClassificationAnnotation('identifier', 1)]
324 prediction = [ClassificationPrediction('identifier', [1.0, 2.0])]
325 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
326 dataset = DummyDataset(label_map={0: '0', 1: '1'})
327 dispatcher = MetricsExecutor(config, dataset)
329 dispatcher.update_metrics_on_batch(annotation, prediction)
331 for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
332 assert evaluation_result.name == 'accuracy_per_class'
333 assert len(evaluation_result.evaluated_value) == 2
334 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
335 assert evaluation_result.evaluated_value[1] == pytest.approx(1.0)
336 assert evaluation_result.reference_value is None
337 assert evaluation_result.threshold is None
339 def test_classification_per_class_accuracy_complete_prediction(self):
340 annotation = [ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 0)]
342 ClassificationPrediction('identifier_1', [1.0, 2.0]),
343 ClassificationPrediction('identifier_2', [2.0, 1.0])
345 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
346 dataset = DummyDataset(label_map={0: '0', 1: '1'})
347 dispatcher = MetricsExecutor(config, dataset)
349 dispatcher.update_metrics_on_batch(annotation, prediction)
351 for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
352 assert evaluation_result.name == 'accuracy_per_class'
353 assert len(evaluation_result.evaluated_value) == 2
354 assert evaluation_result.evaluated_value[0] == pytest.approx(1.0)
355 assert evaluation_result.evaluated_value[1] == pytest.approx(1.0)
356 assert evaluation_result.reference_value is None
357 assert evaluation_result.threshold is None
359 def test_classification_per_class_accuracy_partially_prediction(self):
361 ClassificationAnnotation('identifier_1', 1),
362 ClassificationAnnotation('identifier_2', 0),
363 ClassificationAnnotation('identifier_3', 0)
366 ClassificationPrediction('identifier_1', [1.0, 2.0]),
367 ClassificationPrediction('identifier_2', [2.0, 1.0]),
368 ClassificationPrediction('identifier_3', [1.0, 5.0])
370 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
371 dataset = DummyDataset(label_map={0: '0', 1: '1'})
372 dispatcher = MetricsExecutor(config, dataset)
374 dispatcher.update_metrics_on_batch(annotation, prediction)
376 for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
377 assert evaluation_result.name == 'accuracy_per_class'
378 assert len(evaluation_result.evaluated_value) == 2
379 assert evaluation_result.evaluated_value[0] == pytest.approx(0.5)
380 assert evaluation_result.evaluated_value[1] == pytest.approx(1.0)
381 assert evaluation_result.reference_value is None
382 assert evaluation_result.threshold is None
384 def test_classification_per_class_accuracy_prediction_top3_zero(self):
385 annotation = [ClassificationAnnotation('identifier_1', 0), ClassificationAnnotation('identifier_2', 1)]
387 ClassificationPrediction('identifier_1', [1.0, 2.0, 3.0, 4.0]),
388 ClassificationPrediction('identifier_2', [2.0, 1.0, 3.0, 4.0])
390 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 3}]}
391 dataset = DummyDataset(label_map={0: '0', 1: '1', 2: '2', 3: '3'})
392 dispatcher = MetricsExecutor(config, dataset)
394 dispatcher.update_metrics_on_batch(annotation, prediction)
396 for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
397 assert evaluation_result.name == 'accuracy_per_class'
398 assert len(evaluation_result.evaluated_value) == 4
399 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
400 assert evaluation_result.evaluated_value[1] == pytest.approx(0.0)
401 assert evaluation_result.evaluated_value[2] == pytest.approx(0.0)
402 assert evaluation_result.evaluated_value[3] == pytest.approx(0.0)
403 assert evaluation_result.reference_value is None
404 assert evaluation_result.threshold is None
406 def test_classification_per_class_accuracy_prediction_top3(self):
407 annotation = [ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 1)]
409 ClassificationPrediction('identifier_1', [1.0, 2.0, 3.0, 4.0]),
410 ClassificationPrediction('identifier_2', [2.0, 1.0, 3.0, 4.0])
412 config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 3}]}
413 dataset = DummyDataset(label_map={0: '0', 1: '1', 2: '2', 3: '3'})
414 dispatcher = MetricsExecutor(config, dataset)
416 dispatcher.update_metrics_on_batch(annotation, prediction)
418 for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
419 assert evaluation_result.name == 'accuracy_per_class'
420 assert len(evaluation_result.evaluated_value) == 4
421 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
422 assert evaluation_result.evaluated_value[1] == pytest.approx(0.5)
423 assert evaluation_result.evaluated_value[2] == pytest.approx(0.0)
424 assert evaluation_result.evaluated_value[3] == pytest.approx(0.0)
425 assert evaluation_result.reference_value is None
426 assert evaluation_result.threshold is None
429 class TestMetricExtraArgs:
430 def test_all_metrics_raise_config_error_on_extra_args(self):
431 for provider in Metric.providers:
432 adapter_config = {'type': provider, 'something_extra': 'extra'}
433 with pytest.raises(ConfigError):
434 Metric.provide(provider, adapter_config, None)
436 def test_detection_recall_raise_config_error_on_extra_args(self):
437 adapter_config = {'type': 'recall', 'something_extra': 'extra'}
438 with pytest.raises(ConfigError):
439 Metric.provide('recall', adapter_config, None)
441 def test_detection_miss_rate_raise_config_error_on_extra_args(self):
442 adapter_config = {'type': 'miss_rate', 'something_extra': 'extra'}
443 with pytest.raises(ConfigError):
444 Metric.provide('miss_rate', adapter_config, None)
446 def test_accuracy_raise_config_error_on_extra_args(self):
447 adapter_config = {'type': 'accuracy', 'something_extra': 'extra'}
448 with pytest.raises(ConfigError):
449 Metric.provide('accuracy', adapter_config, None)
451 def test_per_class_accuracy_raise_config_error_on_extra_args(self):
452 adapter_config = {'type': 'accuracy_per_class', 'something_extra': 'extra'}
453 with pytest.raises(ConfigError):
454 Metric.provide('accuracy_per_class', adapter_config, None)
456 def test_character_recognition_accuracy_raise_config_error_on_extra_args(self):
457 adapter_config = {'type': 'character_recognition_accuracy', 'something_extra': 'extra'}
458 with pytest.raises(ConfigError):
459 Metric.provide('character_recognition_accuracy', adapter_config, None)
461 def test_multi_accuracy_raise_config_error_on_extra_args(self):
462 metric_config = {'type': 'multi_accuracy', 'something_extra': 'extra'}
463 with pytest.raises(ConfigError):
464 Metric.provide('multi_accuracy', metric_config, None)
466 def test_multi_precision_raise_config_error_on_extra_args(self):
467 metric_config = {'type': 'multi_precision', 'something_extra': 'extra'}
468 with pytest.raises(ConfigError):
469 Metric.provide('multi_precision', metric_config, None)
471 def test_f1_score_raise_config_error_on_extra_args(self):
472 metric_config = {'type': 'f1-score', 'something_extra': 'extra'}
473 with pytest.raises(ConfigError):
474 Metric.provide('f1-score', metric_config, None)
476 def test_mae_raise_config_error_on_extra_args(self):
477 metric_config = {'type': 'mae', 'something_extra': 'extra'}
478 with pytest.raises(ConfigError):
479 Metric.provide('mae', metric_config, None)
481 def test_mse_raise_config_error_on_extra_args(self):
482 metric_config = {'type': 'mse', 'something_extra': 'extra'}
483 with pytest.raises(ConfigError):
484 Metric.provide('mse', metric_config, None)
486 def test_rmse_raise_config_error_on_extra_args(self):
487 metric_config = {'type': 'rmse', 'something_extra': 'extra'}
488 with pytest.raises(ConfigError):
489 Metric.provide('rmse', metric_config, None)
491 def test_mae_on_interval_raise_config_error_on_extra_args(self):
492 metric_config = {'type': 'mae_on_interval', 'something_extra': 'extra'}
493 with pytest.raises(ConfigError):
494 Metric.provide('mae_on_interval', metric_config, None)
496 def test_mse_on_interval_raise_config_error_on_extra_args(self):
497 metric_config = {'type': 'mse_on_interval', 'something_extra': 'extra'}
498 with pytest.raises(ConfigError):
499 Metric.provide('mse_on_interval', metric_config, None)
501 def test_rmse_on_interval_raise_config_error_on_extra_args(self):
502 metric_config = {'type': 'rmse_on_interval', 'something_extra': 'extra'}
503 with pytest.raises(ConfigError):
504 Metric.provide('rmse_on_interval', metric_config, None)
506 def test_per_point_normed_error_raise_config_error_on_extra_args(self):
507 metric_config = {'type': 'per_point_normed_error', 'something_extra': 'extra'}
508 with pytest.raises(ConfigError):
509 Metric.provide('per_point_normed_error', metric_config, None)
511 def test_average_point_error_raise_config_error_on_extra_args(self):
512 metric_config = {'type': 'normed_error', 'something_extra': 'extra'}
513 with pytest.raises(ConfigError):
514 Metric.provide('normed_error', metric_config, None)
516 def test_reid_cmc_raise_config_error_on_extra_args(self):
517 metric_config = {'type': 'cmc', 'something_extra': 'extra'}
518 with pytest.raises(ConfigError):
519 Metric.provide('cmc', metric_config, None)
521 def test_reid_map_raise_config_error_on_extra_args(self):
522 adapter_config = {'type': 'reid_map', 'something_extra': 'extra'}
523 with pytest.raises(ConfigError):
524 Metric.provide('reid_map', adapter_config, None)
526 def test_pairwise_accuracy_raise_config_error_on_extra_args(self):
527 metric_config = {'type': 'pairwise_accuracy', 'something_extra': 'extra'}
528 with pytest.raises(ConfigError):
529 Metric.provide('pairwise_accuracy', metric_config, None)
531 def test_segmentation_accuracy_raise_config_error_on_extra_args(self):
532 metric_config = {'type': 'segmentation_accuracy', 'something_extra': 'extra'}
533 with pytest.raises(ConfigError):
534 Metric.provide('segmentation_accuracy', metric_config, None)
536 def test_mean_iou_raise_config_error_on_extra_args(self):
537 metric_config = {'type': 'mean_iou', 'something_extra': 'extra'}
538 with pytest.raises(ConfigError):
539 Metric.provide('mean_iou', metric_config, None)
541 def test_mean_accuracy_raise_config_error_on_extra_args(self):
542 metric_config = {'type': 'mean_accuracy', 'something_extra': 'extra'}
543 with pytest.raises(ConfigError):
544 Metric.provide('mean_accuracy', metric_config, None)
546 def test_frequency_weighted_accuracy_raise_config_error_on_extra_args(self):
547 metric_config = {'type': 'frequency_weighted_accuracy', 'something_extra': 'extra'}
548 with pytest.raises(ConfigError):
549 Metric.provide('frequency_weighted_accuracy', metric_config, None)