2 Copyright (c) 2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
17 from pathlib import Path
19 from ..representation import DetectionAnnotation
20 from ..utils import get_key_by_value, read_json, read_xml
22 from .format_converter import FileBasedAnnotationConverter
25 class BITVehicleJSON(FileBasedAnnotationConverter):
26 __provider__ = 'bitvehicle_json'
30 for annotation_image in read_json(self.annotation_file):
31 labels, x_mins, y_mins, x_maxs, y_maxs, is_ignored, occluded = [], [], [], [], [], [], []
32 for detection in annotation_image['objects']:
33 x_min, y_min, x_max, y_max = detection['bbox']
34 label = detection['label']
36 if label == 'ignored':
37 for class_ in _CLASS_TO_IND.values():
38 is_ignored.append(len(labels))
45 is_occluded = detection.get('is_occluded', False) or detection.get('occluded', False)
46 is_difficult = detection.get('difficult', False)
47 if is_occluded or is_difficult:
48 occluded.append(len(labels))
50 labels.append(_CLASS_TO_IND[label])
56 identifier = Path(annotation_image['image']).name
57 annotation = DetectionAnnotation(identifier, labels, x_mins, y_mins, x_maxs, y_maxs)
58 annotation.metadata['is_occluded'] = occluded
59 annotation.metadata['difficult_boxes'] = is_ignored
61 annotations.append(annotation)
63 return annotations, get_meta()
66 class BITVehicle(FileBasedAnnotationConverter):
67 __provider__ = 'bitvehicle'
71 for annotation_image in read_xml(self.annotation_file):
72 if annotation_image.tag != 'image':
75 identifier = annotation_image.get('name')
76 labels, x_mins, y_mins, x_maxs, y_maxs, occluded = [], [], [], [], [], []
77 for roi in annotation_image.findall('box'):
78 label = roi.get("label")
79 x_left = int(roi.get('xtl'))
80 x_right = int(roi.get('xbr'))
81 y_top = int(roi.get('ytl'))
82 y_bottom = int(roi.get('ybr'))
83 x_min, y_min, x_max, y_max = x_left, y_top, x_right - x_left, y_bottom - y_top
84 is_occluded = bool(int(roi.get('occluded')))
86 labels.append(_CLASS_TO_IND[label])
92 occluded.append(len(labels) - 1)
94 annotation = DetectionAnnotation(identifier, labels, x_mins, y_mins, x_maxs, y_maxs)
95 annotation.metadata['is_occluded'] = occluded
97 annotations.append(annotation)
99 return annotations, get_meta()
103 '__background__', # always index 0
108 _CLASS_TO_IND = dict(zip(_CLASSES, list(range(len(_CLASSES)))))
112 labels = dict(enumerate(_CLASSES))
113 labels[-1] = 'ignored'
115 return {'label_map': labels, 'background_label': get_key_by_value(labels, '__background__')}