2 Copyright (c) 2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
16 from contextlib import contextmanager
17 from pathlib import Path
18 from tempfile import TemporaryDirectory
19 from typing import List
23 from accuracy_checker.representation import DetectionAnnotation, DetectionPrediction, SegmentationPrediction, SegmentationAnnotation
24 from accuracy_checker.utils import get_path
28 # since it seems not possible to create pathlib.Path from str with '/' at the end we accept strings
29 # expect paths in posix format
30 def mock_filesystem(hierarchy: List[str]):
31 with TemporaryDirectory() as prefix:
32 for entry in hierarchy:
33 path = Path(prefix) / entry
34 if entry.endswith("/"):
35 path.mkdir(parents=True, exist_ok=True)
38 if parent != Path("."):
39 parent.mkdir(parents=True, exist_ok=True)
41 path.open('w').close()
43 yield get_path(prefix, is_directory=True)
46 def make_representation(bounding_boxes, is_ground_truth=False, score=None, meta=None):
49 bounding_boxes: string or list of strings `score label x0 y0 x1 y1; label score x0 y0 x1 y1; ...`.
50 is_ground_truth: True if bbs are annotation boxes.
51 score: value in [0, 1], if not None, all prediction boxes are considered with the given score.
52 meta: metadata for representation
55 if not isinstance(bounding_boxes, list):
56 bounding_boxes = [bounding_boxes]
59 for idx, box in enumerate(bounding_boxes):
60 arr = np.array(np.mat(box))
63 arr = np.array([]).reshape((0, 5))
65 if is_ground_truth or score:
66 assert arr.shape[1] == 5
67 elif not is_ground_truth and not score:
68 assert arr.shape[1] == 6
70 if not is_ground_truth and score:
72 if np.isscalar(score_) or len(score_) == 1:
73 score_ = np.full(arr.shape[0], score_)
74 arr = np.c_[score_, arr]
77 detection = DetectionAnnotation(str(idx), arr[:, 0], arr[:, 1], arr[:, 2], arr[:, 3], arr[:, 4])
79 detection = DetectionPrediction(str(idx), arr[:, 1], arr[:, 0], arr[:, 2], arr[:, 3], arr[:, 4], arr[:, 5])
82 detection.metadata = meta[idx]
84 result.append(detection)
89 def make_segmentation_representation(mask, ground_truth=False):
91 representation = SegmentationAnnotation('identifier', None)
92 representation.mask = mask
93 return [representation]
95 return [SegmentationPrediction('identifier', mask)]
98 def update_dict(dictionary, **kwargs):
99 copied = dictionary.copy()
100 copied.update(**kwargs)
106 def __init__(self, label_map, bg=-1):
107 self.label_map = label_map
112 return {"label_map": self.label_map, "background_label": self.background}
116 return self.metadata['label_map']
119 # @pytest.fixture(scope="function", params=[
120 # {0: 'dog', -1: 'background'}, {0: 'dog', 1: 'cat', 2: 'human', -1: 'background'}, {0: 'dog', 1: 'cat', 2: 'human'}
121 # ], ids=['single class', 'multi class', 'multi_class_without_background'])
122 # def dataset(request):
123 # labels = request.param
124 # yield DummyDataset(label_map=labels, bg=-1)
127 def multi_class_dataset():
128 labels = {0: 'dog', 1: 'cat', 2: 'human', -1: 'background'}
129 return DummyDataset(label_map=labels, bg=-1)
132 def multi_class_dataset_without_background():
133 labels = {0: 'dog', 1: 'cat', 2: 'human'}
134 return DummyDataset(label_map=labels)
137 def single_class_dataset():
138 labels = {0: 'dog', -1: 'background'}
139 return DummyDataset(label_map=labels, bg=-1)