2 Copyright (c) 2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
18 from .metric import PerImageEvaluationMetric, BaseMetricConfig
19 from ..representation import MultiLabelRecognitionAnnotation, MultiLabelRecognitionPrediction
20 from ..config import StringField, BoolField
23 class MultiLabelMetric(PerImageEvaluationMetric):
24 annotation_types = (MultiLabelRecognitionAnnotation,)
25 prediction_types = (MultiLabelRecognitionPrediction,)
27 def validate_config(self):
28 class _MultiLabelConfigValidator(BaseMetricConfig):
29 label_map = StringField(optional=True)
30 calculate_average = BoolField(optional=True)
32 config_validator = _MultiLabelConfigValidator(
33 'accuracy', on_extra_argument=_MultiLabelConfigValidator.ERROR_ON_EXTRA_ARGUMENT
35 config_validator.validate(self.config)
38 label_map = self.config.get('label_map', 'label_map')
39 self.labels = self.dataset.metadata.get(label_map)
40 self.calculate_average = self.config.get('calculate_average', True)
42 self.meta['scale'] = 1
43 self.meta['postfix'] = ''
44 self.meta['calculate_mean'] = False
45 self.meta['names'] = list(self.labels.values())
46 if self.calculate_average:
47 self.meta['names'].append('average')
48 self.tp = np.zeros_like(list(self.labels.keys()), dtype=np.float)
49 self.fp = np.zeros_like(list(self.labels.keys()), dtype=np.float)
50 self.tn = np.zeros_like(list(self.labels.keys()), dtype=np.float)
51 self.fn = np.zeros_like(list(self.labels.keys()), dtype=np.float)
53 self.counter = np.zeros_like(list(self.labels.keys()), dtype=np.float)
55 def update(self, annotation, prediction):
56 def loss(annotation_labels, prediction_labels):
57 tp_result = np.zeros_like(list(self.labels.keys()), dtype=np.float)
58 fp_results = np.zeros_like(list(self.labels.keys()), dtype=np.float)
59 tn_results = np.zeros_like(list(self.labels.keys()), dtype=np.float)
60 fn_results = np.zeros_like(list(self.labels.keys()), dtype=np.float)
62 for index, label in enumerate(annotation_labels):
63 if label == 1 and label == prediction_labels[index]:
67 if label == 1 and label != prediction_labels[index]:
68 fn_results[index] = 1.
71 if label == 0 and label == prediction_labels[index]:
72 tn_results[index] = 1.
75 if label == 0 and label != prediction_labels[index]:
76 fp_results[index] = 1.
79 return tp_result, fp_results, tn_results, fn_results
81 def counter(annotation_label):
82 count = np.zeros_like(annotation_label, dtype=float)
83 cond = np.where(np.array(annotation_label) != -1)
87 tp_upd, fp_upd, tn_upd, fn_upd = loss(annotation.multi_label, prediction.multi_label)
88 self.tp = np.add(self.tp, tp_upd)
89 self.fp = np.add(self.fp, fp_upd)
90 self.tn = np.add(self.tn, tn_upd)
91 self.fn = np.add(self.fn, fn_upd)
93 self.counter = np.add(self.counter, counter(annotation.multi_label))
95 def evaluate(self, annotations, predictions):
99 class MultiLabelAccuracy(MultiLabelMetric):
100 __provider__ = 'multi_accuracy'
102 def evaluate(self, annotations, predictions):
103 tp_tn = np.add(self.tp, self.tn, dtype=float)
104 per_class = np.divide(tp_tn, self.counter, out=np.zeros_like(tp_tn, dtype=float), where=self.counter != 0)
105 average = np.sum(tp_tn) / np.sum(self.counter)
107 return [*per_class, average]
110 class MultiLabelPrecision(MultiLabelMetric):
111 __provider__ = 'multi_precision'
113 def evaluate(self, annotations, predictions):
114 tp_fp = np.add(self.tp, self.fp, dtype=float)
115 per_class = np.divide(self.tp, tp_fp, out=np.zeros_like(self.tp, dtype=float), where=tp_fp != 0)
116 if not self.calculate_average:
118 average = np.sum(self.tp) / np.sum(tp_fp)
120 return [*per_class, average]
123 class MultiLabelRecall(MultiLabelMetric):
124 __provider__ = 'multi_recall'
126 def evaluate(self, annotations, predictions):
127 tp_fn = np.add(self.tp, self.fn, dtype=float)
128 per_class = np.divide(self.tp, tp_fn, out=np.zeros_like(self.tp, dtype=float), where=tp_fn != 0)
129 if not self.calculate_average:
131 average = np.sum(self.tp) / np.sum(tp_fn)
133 return [*per_class, average]
136 class F1Score(PerImageEvaluationMetric):
137 __provider__ = 'f1-score'
138 annotation_types = (MultiLabelRecognitionAnnotation,)
139 prediction_types = (MultiLabelRecognitionPrediction,)
141 def __init__(self, *args, **kwargs):
142 super().__init__(*args, **kwargs)
143 self.precision = MultiLabelPrecision(self.config, self.dataset)
144 self.recall = MultiLabelRecall(self.config, self.dataset)
146 def validate_config(self):
147 class _F1ScoreValidator(BaseMetricConfig):
148 label_map = StringField(optional=True)
149 calculate_average = BoolField(optional=True)
151 f1_score_config_validator = _F1ScoreValidator(
152 'f1_score', on_extra_argument=_F1ScoreValidator.ERROR_ON_EXTRA_ARGUMENT
154 f1_score_config_validator.validate(self.config)
157 label_map = self.config.get('label_map', 'label_map')
158 self.labels = self.dataset.metadata.get(label_map)
159 self.calculate_average = self.config.get('calculate_average', True)
160 self.meta['names'] = list(self.labels.values())
161 if self.calculate_average:
162 self.meta['names'].append('average')
164 self.meta['scale'] = 1
165 self.meta['postfix'] = ''
166 self.meta['calculate_mean'] = False
167 self.meta['names'] = list(self.labels.values()) + ['average']
169 def update(self, annotation, prediction):
170 self.precision.update(annotation, prediction)
171 self.recall.update(annotation, prediction)
173 def evaluate(self, annotations, predictions):
174 precisions = self.precision.evaluate(annotations, predictions)
175 recalls = self.recall.evaluate(annotations, predictions)
177 precision_add = np.add(precisions[:-1], recalls[:-1], dtype=float)
178 precision_multiply = np.multiply(precisions[:-1], recalls[:-1], dtype=float)
180 per_class = 2 * np.divide(
181 precision_multiply, precision_add, out=np.zeros_like(precision_multiply, dtype=float),
182 where=precision_add != 0
184 if not self.calculate_average:
187 average = 2 * (precisions[-1] * recalls[-1]) / (precisions[-1] + recalls[-1])
189 return [*per_class, average]