2 Copyright (c) 2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
17 from pathlib import Path
18 from argparse import ArgumentParser
19 from functools import partial
21 from .config import ConfigReader
22 from .logging import print_info, add_file_handler
23 from .model_evaluator import ModelEvaluator
24 from .progress_reporters import ProgressReporter
25 from .utils import get_path
28 def build_arguments_parser():
29 parser = ArgumentParser(description='NN Validation on Caffe and IE', allow_abbrev=False)
31 '-d', '--definitions',
32 help='path to the yml file with definitions',
38 help='path to the yml file with local configuration',
44 help='prefix path to the models and weights',
45 type=partial(get_path, is_directory=True),
51 help='prefix path to the data source',
52 type=partial(get_path, is_directory=True),
57 '-a', '--annotations',
58 help='prefix path to the converted annotations and datasets meta data',
59 type=partial(get_path, is_directory=True),
65 help='prefix path to extensions folder',
66 type=partial(get_path, is_directory=True),
71 '--cpu_extensions_mode',
72 help='specified preferable set of processor instruction for automatic searching cpu extension lib',
74 choices=['avx2', 'sse4']
78 help='prefix path to bitstreams folder',
79 type=partial(get_path, is_directory=True),
84 '--stored_predictions',
85 help='path to file with saved predictions. Used for development',
86 # since at the first time file does not exist and then created we can not always check existence
90 '-C', '--converted_models',
91 help='directory to store Model Optimizer converted models. Used for DLSDK launcher only',
92 type=partial(get_path, is_directory=True),
97 '-M', '--model_optimizer',
98 help='path to model optimizer caffe directory',
99 type=partial(get_path, is_directory=True),
100 # there is no default value because if user did not specify it we use specific locations
101 # defined in model_conversion.py
105 '--tf_custom_op_config_dir',
106 help='path to directory with tensorflow custom operation configuration files for model optimizer',
107 type=partial(get_path, is_directory=True),
108 # there is no default value because if user did not specify it we use specific location
109 # defined in model_conversion.py
113 '--tf_obj_detection_api_pipeline_config_path',
114 help='path to directory with tensorflow object detection api pipeline configuration files for model optimizer',
115 type=partial(get_path, is_directory=True),
116 # there is no default value because if user did not specify it we use specific location
117 # defined in model_conversion.py
122 help='progress reporter',
127 '-tf', '--target_framework',
128 help='framework for infer',
132 '-td', '--target_devices',
133 help='Space separated list of devices for infer',
139 '-tt', '--target_tags',
140 help='Space separated list of launcher tags for infer',
147 help='file for additional logging results',
152 '--ignore_result_formatting',
153 help='allow to get raw metrics results without data formatting',
159 '-am', '--affinity_map',
160 help='prefix path to the affinity maps',
161 type=partial(get_path, is_directory=True),
168 help='aocl executable path for FPGA bitstream programming',
177 args = build_arguments_parser().parse_args()
178 progress_reporter = ProgressReporter.provide((
179 args.progress if ':' not in args.progress
180 else args.progress.split(':')[0]
183 add_file_handler(args.log_file)
185 config = ConfigReader.merge(args)
187 for model in config['models']:
188 for launcher_config in model['launchers']:
189 for dataset_config in model['datasets']:
190 print_processing_info(
192 launcher_config['framework'],
193 launcher_config['device'],
194 launcher_config.get('tags'),
195 dataset_config['name']
197 model_evaluator = ModelEvaluator.from_configs(launcher_config, dataset_config)
198 progress_reporter.reset(len(model_evaluator.dataset))
199 model_evaluator.process_dataset(args.stored_predictions, progress_reporter=progress_reporter)
200 model_evaluator.compute_metrics(ignore_results_formatting=args.ignore_result_formatting)
202 model_evaluator.release()
205 def print_processing_info(model, launcher, device, tags, dataset):
206 print_info('Processing info:')
207 print_info('model: {}'.format(model))
208 print_info('launcher: {}'.format(launcher))
210 print_info('launcher tags: {}'.format(' '.join(tags)))
211 print_info('device: {}'.format(device))
212 print_info('dataset: {}'.format(dataset))
215 if __name__ == '__main__':