Publishing 2019 R1 content
[platform/upstream/dldt.git] / tools / accuracy_checker / accuracy_checker / metrics / classification.py
1 """
2 Copyright (c) 2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8       http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16
17 import numpy as np
18
19 from ..representation import ClassificationAnnotation, ClassificationPrediction
20 from ..config import NumberField, StringField
21 from .metric import BaseMetricConfig, PerImageEvaluationMetric
22 from .average_meter import AverageMeter
23
24
25 class ClassificationAccuracy(PerImageEvaluationMetric):
26     """
27     Class for evaluating accuracy metric of classification models.
28     """
29
30     __provider__ = 'accuracy'
31
32     annotation_types = (ClassificationAnnotation, )
33     prediction_types = (ClassificationPrediction, )
34
35     def __init__(self, *args, **kwargs):
36         super().__init__(*args, **kwargs)
37
38         def loss(annotation_label, prediction_top_k_labels):
39             return int(annotation_label in prediction_top_k_labels)
40         self.accuracy = AverageMeter(loss)
41
42     def validate_config(self):
43         class _AccuracyValidator(BaseMetricConfig):
44             top_k = NumberField(floats=False, min_value=1, optional=True)
45
46         accuracy_validator = _AccuracyValidator(
47             'accuracy',
48             on_extra_argument=_AccuracyValidator.ERROR_ON_EXTRA_ARGUMENT
49         )
50         accuracy_validator.validate(self.config)
51
52     def configure(self):
53         self.top_k = self.config.get('top_k', 1)
54
55     def update(self, annotation, prediction):
56         self.accuracy.update(annotation.label, prediction.top_k(self.top_k))
57
58     def evaluate(self, annotations, predictions):
59         return self.accuracy.evaluate()
60
61
62 class ClassificationAccuracyClasses(PerImageEvaluationMetric):
63     """
64     Class for evaluating accuracy for each class of classification models.
65     """
66
67     __provider__ = 'accuracy_per_class'
68
69     annotation_types = (ClassificationAnnotation, )
70     prediction_types = (ClassificationPrediction, )
71
72     def validate_config(self):
73         class _AccuracyValidator(BaseMetricConfig):
74             top_k = NumberField(floats=False, min_value=1, optional=True)
75             label_map = StringField(optional=True)
76
77         accuracy_validator = _AccuracyValidator(
78             'accuracy',
79             on_extra_argument=_AccuracyValidator.ERROR_ON_EXTRA_ARGUMENT
80         )
81         accuracy_validator.validate(self.config)
82
83     def configure(self):
84         self.top_k = self.config.get('top_k', 1)
85         label_map = self.config.get('label_map', 'label_map')
86         self.labels = self.dataset.metadata.get(label_map)
87         self.meta['names'] = list(self.labels.values())
88
89         def loss(annotation_label, prediction_top_k_labels):
90             result = np.zeros_like(list(self.labels.keys()))
91             if annotation_label in prediction_top_k_labels:
92                 result[annotation_label] = 1
93
94             return result
95
96         def counter(annotation_label):
97             result = np.zeros_like(list(self.labels.keys()))
98             result[annotation_label] = 1
99             return result
100
101         self.accuracy = AverageMeter(loss, counter)
102
103     def update(self, annotation, prediction):
104         self.accuracy.update(annotation.label, prediction.top_k(self.top_k))
105
106     def evaluate(self, annotations, predictions):
107         return self.accuracy.evaluate()