Publishing 2019 R1 content
[platform/upstream/dldt.git] / tools / accuracy_checker / accuracy_checker / metrics / multilabel_recognition.py
1 """
2 Copyright (c) 2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8       http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16
17 import numpy as np
18 from .metric import PerImageEvaluationMetric, BaseMetricConfig
19 from ..representation import MultiLabelRecognitionAnnotation, MultiLabelRecognitionPrediction
20 from ..config import StringField, BoolField
21
22
23 class MultiLabelMetric(PerImageEvaluationMetric):
24     annotation_types = (MultiLabelRecognitionAnnotation,)
25     prediction_types = (MultiLabelRecognitionPrediction,)
26
27     def validate_config(self):
28         class _MultiLabelConfigValidator(BaseMetricConfig):
29             label_map = StringField(optional=True)
30             calculate_average = BoolField(optional=True)
31
32         config_validator = _MultiLabelConfigValidator(
33             'accuracy', on_extra_argument=_MultiLabelConfigValidator.ERROR_ON_EXTRA_ARGUMENT
34         )
35         config_validator.validate(self.config)
36
37     def configure(self):
38         label_map = self.config.get('label_map', 'label_map')
39         self.labels = self.dataset.metadata.get(label_map)
40         self.calculate_average = self.config.get('calculate_average', True)
41
42         self.meta['scale'] = 1
43         self.meta['postfix'] = ''
44         self.meta['calculate_mean'] = False
45         self.meta['names'] = list(self.labels.values())
46         if self.calculate_average:
47             self.meta['names'].append('average')
48         self.tp = np.zeros_like(list(self.labels.keys()), dtype=np.float)
49         self.fp = np.zeros_like(list(self.labels.keys()), dtype=np.float)
50         self.tn = np.zeros_like(list(self.labels.keys()), dtype=np.float)
51         self.fn = np.zeros_like(list(self.labels.keys()), dtype=np.float)
52
53         self.counter = np.zeros_like(list(self.labels.keys()), dtype=np.float)
54
55     def update(self, annotation, prediction):
56         def loss(annotation_labels, prediction_labels):
57             tp_result = np.zeros_like(list(self.labels.keys()), dtype=np.float)
58             fp_results = np.zeros_like(list(self.labels.keys()), dtype=np.float)
59             tn_results = np.zeros_like(list(self.labels.keys()), dtype=np.float)
60             fn_results = np.zeros_like(list(self.labels.keys()), dtype=np.float)
61
62             for index, label in enumerate(annotation_labels):
63                 if label == 1 and label == prediction_labels[index]:
64                     tp_result[index] = 1.
65                     continue
66
67                 if label == 1 and label != prediction_labels[index]:
68                     fn_results[index] = 1.
69                     continue
70
71                 if label == 0 and label == prediction_labels[index]:
72                     tn_results[index] = 1.
73                     continue
74
75                 if label == 0 and label != prediction_labels[index]:
76                     fp_results[index] = 1.
77                     continue
78
79             return tp_result, fp_results, tn_results, fn_results
80
81         def counter(annotation_label):
82             count = np.zeros_like(annotation_label, dtype=float)
83             cond = np.where(np.array(annotation_label) != -1)
84             count[cond] = 1.
85             return count
86
87         tp_upd, fp_upd, tn_upd, fn_upd = loss(annotation.multi_label, prediction.multi_label)
88         self.tp = np.add(self.tp, tp_upd)
89         self.fp = np.add(self.fp, fp_upd)
90         self.tn = np.add(self.tn, tn_upd)
91         self.fn = np.add(self.fn, fn_upd)
92
93         self.counter = np.add(self.counter, counter(annotation.multi_label))
94
95     def evaluate(self, annotations, predictions):
96         pass
97
98
99 class MultiLabelAccuracy(MultiLabelMetric):
100     __provider__ = 'multi_accuracy'
101
102     def evaluate(self, annotations, predictions):
103         tp_tn = np.add(self.tp, self.tn, dtype=float)
104         per_class = np.divide(tp_tn, self.counter, out=np.zeros_like(tp_tn, dtype=float), where=self.counter != 0)
105         average = np.sum(tp_tn) / np.sum(self.counter)
106
107         return [*per_class, average]
108
109
110 class MultiLabelPrecision(MultiLabelMetric):
111     __provider__ = 'multi_precision'
112
113     def evaluate(self, annotations, predictions):
114         tp_fp = np.add(self.tp, self.fp, dtype=float)
115         per_class = np.divide(self.tp, tp_fp, out=np.zeros_like(self.tp, dtype=float), where=tp_fp != 0)
116         if not self.calculate_average:
117             return per_class
118         average = np.sum(self.tp) / np.sum(tp_fp)
119
120         return [*per_class, average]
121
122
123 class MultiLabelRecall(MultiLabelMetric):
124     __provider__ = 'multi_recall'
125
126     def evaluate(self, annotations, predictions):
127         tp_fn = np.add(self.tp, self.fn, dtype=float)
128         per_class = np.divide(self.tp, tp_fn, out=np.zeros_like(self.tp, dtype=float), where=tp_fn != 0)
129         if not self.calculate_average:
130             return per_class
131         average = np.sum(self.tp) / np.sum(tp_fn)
132
133         return [*per_class, average]
134
135
136 class F1Score(PerImageEvaluationMetric):
137     __provider__ = 'f1-score'
138     annotation_types = (MultiLabelRecognitionAnnotation,)
139     prediction_types = (MultiLabelRecognitionPrediction,)
140
141     def __init__(self, *args, **kwargs):
142         super().__init__(*args, **kwargs)
143         self.precision = MultiLabelPrecision(self.config, self.dataset)
144         self.recall = MultiLabelRecall(self.config, self.dataset)
145
146     def validate_config(self):
147         class _F1ScoreValidator(BaseMetricConfig):
148             label_map = StringField(optional=True)
149             calculate_average = BoolField(optional=True)
150
151         f1_score_config_validator = _F1ScoreValidator(
152             'f1_score', on_extra_argument=_F1ScoreValidator.ERROR_ON_EXTRA_ARGUMENT
153         )
154         f1_score_config_validator.validate(self.config)
155
156     def configure(self):
157         label_map = self.config.get('label_map', 'label_map')
158         self.labels = self.dataset.metadata.get(label_map)
159         self.calculate_average = self.config.get('calculate_average', True)
160         self.meta['names'] = list(self.labels.values())
161         if self.calculate_average:
162             self.meta['names'].append('average')
163
164         self.meta['scale'] = 1
165         self.meta['postfix'] = ''
166         self.meta['calculate_mean'] = False
167         self.meta['names'] = list(self.labels.values()) + ['average']
168
169     def update(self, annotation, prediction):
170         self.precision.update(annotation, prediction)
171         self.recall.update(annotation, prediction)
172
173     def evaluate(self, annotations, predictions):
174         precisions = self.precision.evaluate(annotations, predictions)
175         recalls = self.recall.evaluate(annotations, predictions)
176
177         precision_add = np.add(precisions[:-1], recalls[:-1], dtype=float)
178         precision_multiply = np.multiply(precisions[:-1], recalls[:-1], dtype=float)
179
180         per_class = 2 * np.divide(
181             precision_multiply, precision_add, out=np.zeros_like(precision_multiply, dtype=float),
182             where=precision_add != 0
183         )
184         if not self.calculate_average:
185             return per_class
186
187         average = 2 * (precisions[-1] * recalls[-1]) / (precisions[-1] + recalls[-1])
188
189         return [*per_class, average]