2 Copyright (c) 2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
22 from ..representation import HitRatioAnnotation, HitRatioPrediction
23 from .metric import FullDatasetEvaluationMetric, BaseMetricConfig
24 from ..config import NumberField
26 class BaseRecommenderMetric(FullDatasetEvaluationMetric):
27 annotation_types = (HitRatioAnnotation, )
28 prediction_types = (HitRatioPrediction, )
30 def __init__(self, discounter, *args, **kwargs):
31 super().__init__(*args, **kwargs)
32 self.discounter = discounter or (lambda item, rank: int(item in rank))
35 def validate_config(self):
36 class _RecommenderValidator(BaseMetricConfig):
37 top_k = NumberField(floats=False, min_value=1, optional=True)
39 recommender_validator = _RecommenderValidator(
41 on_extra_argument=_RecommenderValidator.ERROR_ON_EXTRA_ARGUMENT
43 recommender_validator.validate(self.config)
46 self.top_k = self.config.get('top_k', 10)
47 self.users_num = self.dataset.metadata.get('users_number')
48 self.pred_per_user = {i: [] for i in range(self.users_num)}
51 def update(self, annotation, prediction):
52 self.pred_per_user[prediction.user].append((prediction.item, prediction.scores))
53 if annotation.positive:
54 self.gt_items[annotation.user] = annotation.item
56 def evaluate(self, annotations, predictions):
57 iter_num = len(self.pred_per_user[0])
60 for user in range(self.users_num):
62 for j in range(iter_num):
63 item = self.pred_per_user[user][j][0]
64 score = self.pred_per_user[user][j][1]
65 map_item_score[item] = score
66 ranklist = heapq.nlargest(10, map_item_score, key=map_item_score.get)
67 measure.append(self.discounter(self.gt_items[user], ranklist))
69 return np.mean(measure)
71 def hit_ratio_discounter(item, rank):
72 return int(item in rank)
74 def ndcg_discunter(item, rank):
76 return math.log(2) / math.log(rank.index(item) + 2)
81 class HitRatioMetric(BaseRecommenderMetric):
83 Class for evaluating Hit Ratio metric
86 __provider__ = 'hit_ratio'
88 def __init__(self, *args, **kwargs):
89 super().__init__(hit_ratio_discounter, *args, **kwargs)
92 class NDSGMetric(BaseRecommenderMetric):
94 Class for evaluating Normalized Discounted Cumulative Gain metric
99 def __init__(self, *args, **kwargs):
100 super().__init__(ndcg_discunter, *args, **kwargs)