Publishing 2019 R1 content
[platform/upstream/dldt.git] / tools / accuracy_checker / accuracy_checker / launcher / dlsdk_launcher.py
1 """
2 Copyright (c) 2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8       http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16
17 import subprocess
18 from pathlib import Path
19 import os
20 import platform
21 import numpy as np
22 from cpuinfo import get_cpu_info
23 import openvino.inference_engine as ie
24
25 from ..config import ConfigError, NumberField, PathField, StringField, DictField, ListField, BoolField
26 from ..logging import warning
27 from ..utils import read_yaml, contains_all, extract_image_representations, get_path
28 from .launcher import Launcher, LauncherConfig
29 from .input_feeder import InputFeeder
30 from .model_conversion import convert_model
31 from ..logging import print_info
32
33 HETERO_KEYWORD = 'HETERO:'
34 FPGA_COMPILER_MODE_VAR = 'CL_CONTEXT_COMPILER_MODE_INTELFPGA'
35 DEVICE_REGEX = r"(?:^{hetero}(?P<devices>(?:{devices})(?:,(?:{devices}))*)$)|(?:^(?P<device>{devices})$)".format(
36     hetero=HETERO_KEYWORD, devices="|".join(plugin for plugin in ie.known_plugins)
37 )
38
39
40 class CPUExtensionPathField(PathField):
41     def __init__(self, **kwargs):
42         super().__init__(is_directory=False, **kwargs)
43
44     def validate(self, entry, field_uri=None):
45         if entry is None:
46             return
47
48         field_uri = field_uri or self.field_uri
49         validation_entry = ''
50         try:
51             validation_entry = Path(entry)
52         except TypeError:
53             self.raise_error(entry, field_uri, "values is expected to be path-like")
54         is_directory = False
55         if validation_entry.parts[-1] == 'AUTO':
56             validation_entry = validation_entry.parent
57             is_directory = True
58         try:
59             get_path(validation_entry, is_directory)
60         except FileNotFoundError:
61             self.raise_error(validation_entry, field_uri, "path does not exist")
62         except NotADirectoryError:
63             self.raise_error(validation_entry, field_uri, "path is not a directory")
64         except IsADirectoryError:
65             self.raise_error(validation_entry, field_uri, "path is a directory, regular file expected")
66
67
68 class DLSDKLauncherConfig(LauncherConfig):
69     """
70     Specifies configuration structure for DLSDK launcher.
71     """
72
73     device = StringField(regex=DEVICE_REGEX)
74     model = PathField(optional=True)
75     weights = PathField(optional=True)
76     caffe_model = PathField(optional=True)
77     caffe_weights = PathField(optional=True)
78     mxnet_weights = PathField(optional=True)
79     tf_model = PathField(optional=True)
80     onnx_model = PathField(optional=True)
81     kaldi_model = PathField(optional=True)
82     cpu_extensions = CPUExtensionPathField(optional=True)
83     gpu_extensions = PathField(optional=True)
84     bitstream = PathField(optional=True)
85     mo_params = DictField(optional=True)
86     mo_flags = ListField(optional=True)
87     outputs = ListField(optional=True)
88     allow_reshape_input = BoolField(optional=True)
89     affinity_map = PathField(optional=True)
90     batch = NumberField(floats=False, min_value=1, optional=True)
91
92     _models_prefix = PathField(is_directory=True, optional=True)
93     _model_optimizer = PathField(optional=True, allow_none=True, is_directory=True)
94     _tf_obj_detection_api_config_dir = PathField(optional=True, allow_none=True, is_directory=True)
95     _tf_custom_op_config_dir = PathField(optional=True, allow_none=True, is_directory=True)
96     _cpu_extensions_mode = StringField(optional=True, allow_none=True)
97     _aocl = PathField(optional=True)
98
99     def __init__(self, config_uri, **kwargs):
100         super().__init__(config_uri, **kwargs)
101         self.need_conversion = None
102
103     def validate(self, entry, field_uri=None):
104         """
105         Validate that launcher entry meets all configuration structure requirements.
106
107         Args:
108             entry: launcher configuration file entry.
109             field_uri: id of launcher entry.
110         """
111
112         dlsdk_model_options = ['model', 'weights']
113         caffe_model_options = ['caffe_model', 'caffe_weights']
114         mxnet_model_options = ['mxnet_weights']
115         tf_model_options = ['tf_model']
116         onnx_model_options = ['onnx_model']
117         kaldi_model_options = ['kaldi_model']
118
119         multiple_model_sources_err = (
120             'Either model and weights or caffe_model and caffe_weights '
121             'or mxnet_weights or tf_model should be specified.'
122         )
123         sources = {
124             'dlsdk': dlsdk_model_options,
125             'caffe': caffe_model_options,
126             'tf': tf_model_options,
127             'mxnet': mxnet_model_options,
128             'onnx': onnx_model_options,
129             'kaldi': kaldi_model_options
130         }
131
132         specified = []
133         for mo_source_option in sources:
134             if contains_all(entry, sources[mo_source_option]):
135                 specified.append(mo_source_option)
136
137         if not specified:
138             raise ConfigError('{} None provided'.format(multiple_model_sources_err))
139         if len(specified) > 1:
140             raise ConfigError('{} Several provided'.format(multiple_model_sources_err))
141
142         self._set_model_source(specified[0])
143         super().validate(entry, field_uri)
144
145     def _set_model_source(self, framework):
146         self.need_conversion = framework != 'dlsdk'
147         self.framework = framework
148         self.fields['model'].optional = self.need_conversion
149         self.fields['weights'].optional = self.need_conversion
150         self.fields['caffe_model'].optional = framework != 'caffe'
151         self.fields['caffe_weights'].optional = framework != 'caffe'
152         self.fields['mxnet_weights'].optional = framework != 'mxnet'
153         self.fields['tf_model'].optional = framework != 'tf'
154         self.fields['onnx_model'].optional = framework != 'onnx'
155         self.fields['kaldi_model'].optional = framework != 'kaldi'
156
157
158 class DLSDKLauncher(Launcher):
159     """
160     Class for infer model using DLSDK framework.
161     """
162
163     __provider__ = 'dlsdk'
164
165     def __init__(self, config_entry, adapter):
166         super().__init__(config_entry, adapter)
167
168         def fit_to_input(data, input_layer):
169             shape_len = len(input_layer.shape)
170             if shape_len == 4:
171                 return np.transpose(data, [0, 3, 1, 2])
172             if shape_len == 2:
173                 if len(np.shape(data)) == 1:
174                     return np.transpose([data])
175             return np.array(data)
176
177         dlsdk_launcher_config = DLSDKLauncherConfig('DLSDK_Launcher')
178         dlsdk_launcher_config.validate(self._config)
179
180         self._device = self._config['device'].upper()
181         self._set_variable = False
182         self._prepare_bitstream_firmware(self._config)
183
184         if dlsdk_launcher_config.need_conversion:
185             self._model, self._weights = DLSDKLauncher.convert_model(self._config, dlsdk_launcher_config.framework)
186         else:
187             self._model = self._config['model']
188             self._weights = self._config['weights']
189
190         self._create_ie_plugin()
191         self.network = ie.IENetwork(model=str(self._model), weights=str(self._weights))
192         self.original_outputs = self.network.outputs
193         outputs = self._config.get('outputs')
194         if outputs:
195             self.network.add_outputs(outputs)
196         self.input_feeder = InputFeeder(
197             self._config.get('inputs') or [],
198             self.network.inputs,
199             prepare_input_data=fit_to_input
200         )
201         self._batch = self._config.get('batch', self.network.batch_size)
202         if self._batch != self.network.batch_size:
203             self._set_batch_size(self._batch)
204         affinity_map_path = self._config.get('affinity_map')
205         if affinity_map_path and self._is_hetero():
206             self._set_affinity(affinity_map_path)
207         elif affinity_map_path:
208             warning('affinity_map config is applicable only for HETERO device')
209         self.exec_network = self.plugin.load(network=self.network)
210         self.allow_reshape_input = self._config.get('allow_reshape_input', False)
211
212     @property
213     def inputs(self):
214         """
215         Returns:
216             inputs in NCHW format.
217         """
218
219         # reverse and omit N
220         return {k: v.shape[1:] for k, v in self.network.inputs.items() if k in self.input_feeder.non_constant_inputs}
221
222     @property
223     def batch(self):
224         return self._batch
225
226     def predict(self, identifiers, data_representation, *args, **kwargs):
227         """
228         Args:
229             identifiers: list of input data identifiers.
230             data_representation: list of input data representations, which contain preprocessed data and its metadata.
231         Returns:
232             output of model converted to appropriate representation.
233         """
234         _, metadata = extract_image_representations(data_representation)
235         non_constant_inputs = self.input_feeder.fill_non_constant_inputs(data_representation)
236         results = []
237         for infer_inputs in non_constant_inputs:
238             input_shapes = {}
239             do_reshape = False
240             for input_blob in self.network.inputs:
241                 if input_blob in self.input_feeder.const_inputs:
242                     input_shapes[input_blob] = self.network.inputs[input_blob].shape
243                     continue
244
245                 data = infer_inputs[input_blob]
246                 input_shapes[input_blob] = data.shape
247                 if self.allow_reshape_input:
248                     if tuple(self.network.inputs[input_blob].shape) != data.shape:
249                         do_reshape = True
250
251             if do_reshape:
252                 self._reshape_input(input_shapes)
253
254             for input_blob, data in infer_inputs.items():
255                 infer_inputs[input_blob] = self._align_data_shape(data, input_blob)
256
257             network_inputs_data = {**infer_inputs, **self.input_feeder.const_inputs}
258
259             benchmark = kwargs.get('benchmark')
260             if benchmark:
261                 benchmark(network_inputs_data)
262
263             result = self.exec_network.infer(network_inputs_data)
264
265             raw_outputs_callback = kwargs.get('output_callback')
266             if raw_outputs_callback:
267                 raw_outputs_callback(result)
268
269             results.append(result)
270
271         if self.adapter:
272             self.adapter.output_blob = self.adapter.output_blob or next(iter(self.original_outputs))
273             results = self.adapter(results, identifiers, [self._provide_inputs_info_to_meta(meta) for meta in metadata])
274
275         return results
276
277     def _is_hetero(self):
278         return self._device.startswith(HETERO_KEYWORD)
279
280     def _devices_list(self):
281         device = self._device
282         if HETERO_KEYWORD in self._device:
283             device = self._device[len(HETERO_KEYWORD):]
284
285         return [platform_.upper().strip() for platform_ in device.split(',')]
286
287     def _set_affinity(self, affinity_map_path):
288         self.plugin.set_initial_affinity(self.network)
289         layers = self.network.layers
290         for layer, device in read_yaml(affinity_map_path).items():
291             if layer not in layers:
292                 raise ConfigError('Layer \'{layer}\' is not present in network'.format(layer=layer))
293             if device not in self._devices_list():
294                 raise ConfigError(
295                     'Device \'{device}\' set for \'{layer}\' layer is not present in '
296                     'provided configuration \'{configuration}\''.format(
297                         device=device, layer=layer, configuration=self._device
298                     )
299                 )
300             layers[layer].affinity = device
301
302     def _is_fpga(self):
303         return 'FPGA' in self._devices_list()
304
305     def _prepare_bitstream_firmware(self, config):
306         if not self._is_fpga():
307             return
308
309         compiler_mode = os.environ.get(FPGA_COMPILER_MODE_VAR)
310         if compiler_mode == '3':
311             return
312
313         bitstream = config.get('bitstream')
314         if bitstream:
315             print_info('programming bitstream: {}'.format(bitstream.name))
316             aocl_executable = config.get('_aocl')
317             if aocl_executable:
318                 subprocess.run([str(aocl_executable), 'program', 'acl0', str(bitstream)])
319                 os.environ[FPGA_COMPILER_MODE_VAR] = '3'
320                 self._set_variable = True
321             else:
322                 aocx_variable = 'DLA_AOCX'
323                 previous_bitstream = os.environ.get(aocx_variable)
324                 if previous_bitstream == str(bitstream):
325                     return
326                 os.environ[aocx_variable] = str(bitstream)
327                 if not os.environ.get(aocx_variable):
328                     warning('Warning: {} has not been set'.format(aocx_variable))
329
330     @staticmethod
331     def get_cpu_extension(cpu_extensions, selection_mode):
332         cpu_extensions_name = cpu_extensions.parts[-1]
333         if cpu_extensions_name != 'AUTO':
334             return cpu_extensions
335         extensions_path = cpu_extensions.parent
336         file_format = '{}.dll' if platform.system() == 'Windows' else 'lib{}.so'
337         if not selection_mode:
338             default_cpu_extension = file_format.format('cpu_extension')
339             extension_list = list(extensions_path.glob(default_cpu_extension))
340
341             if extension_list:
342                 return extension_list[0]
343
344             cpu_info_flags = get_cpu_info()['flags']
345             selection_mode = 'avx2' if 'avx2' in cpu_info_flags else 'sse4'
346         extension_list = list(extensions_path.glob(file_format.format('cpu_extension_{}'.format(selection_mode))))
347
348         if not extension_list:
349             raise ConfigError('suitable CPU extension lib not found in {}'.format(extensions_path))
350
351         return extension_list[0]
352
353     @staticmethod
354     def convert_model(config, framework='caffe'):
355         config_model = config.get(framework + '_model', '')
356         config_weights = config.get(framework + '_weights', '')
357
358         mo_search_paths = []
359         model_optimizer = config.get('_model_optimizer')
360         if model_optimizer:
361             mo_search_paths.append(model_optimizer)
362
363         model_optimizer_directory_env = os.environ.get('MO_DIR')
364         if model_optimizer_directory_env:
365             mo_search_paths.append(model_optimizer_directory_env)
366
367         return convert_model(
368             Path(config_model).name.split('.')[0] or Path(config_weights).name.split('.')[0],
369             config_model, config_weights, framework,
370             mo_search_paths, config.get('mo_params'),
371             config.get('mo_flags'),
372             config.get('_tf_custom_op_config_dir'),
373             config.get('_tf_obj_detection_api_pipeline_config_path')
374         )
375
376     def _reshape_input(self, shapes):
377         self.network.reshape(shapes)
378         del self.exec_network
379         self._create_ie_plugin(log=False)
380         self.exec_network = self.plugin.load(network=self.network)
381
382     def _set_batch_size(self, batch_size):
383         # in some cases we can not use explicit property for setting batch size, so we need to use reshape instead
384         # save const inputs without changes
385         const_inputs_shapes = {
386             input_name: self.network.inputs[input_name].shape for input_name in self.input_feeder.const_inputs
387         }
388         new_non_const_input_shapes = {}
389         for layer_name in self.input_feeder.non_constant_inputs:
390             layer = self.network.inputs[layer_name]
391             layer_shape = layer.shape
392             ind_batch = layer.layout.find('N')
393             if ind_batch != -1:
394                 layer_shape[ind_batch] = batch_size
395             new_non_const_input_shapes[layer_name] = layer_shape
396
397         self.network.reshape({**const_inputs_shapes, **new_non_const_input_shapes})
398
399     def _align_data_shape(self, data, input_blob):
400         input_shape = self.network.inputs[input_blob].shape
401
402         if data.shape[0] != input_shape[0]:
403             input_shape[0] = data.shape[0]
404         if len(data.shape) > 1 and len(input_shape) > 1 and data.shape[1] != input_shape[1]:
405             data = data[:, :input_shape[1]]
406
407         return data.reshape(input_shape)
408
409     def _create_ie_plugin(self, log=True):
410         if hasattr(self, 'plugin'):
411             del self.plugin
412         self.plugin = ie.IEPlugin(self._device)
413         if log:
414             print_info('IE version: {}'.format(ie.get_version()))
415             print_info('Loaded {} plugin version: {}'.format(self.plugin.device, self.plugin.version))
416
417         cpu_extensions = self._config.get('cpu_extensions')
418         if cpu_extensions and 'CPU' in self._device:
419             selection_mode = self._config.get('_cpu_extensions_mode')
420             cpu_extensions = DLSDKLauncher.get_cpu_extension(cpu_extensions, selection_mode)
421             self.plugin.add_cpu_extension(str(cpu_extensions))
422         if self._config.get('gpu_extensions') and 'GPU' in self._device:
423             self.plugin.set_config('CONFIG_FILE', str(self._config.get('gpu_extensions')))
424
425     def release(self):
426         if self._set_variable:
427             del os.environ[FPGA_COMPILER_MODE_VAR]
428         del self.network
429         del self.exec_network
430         del self.plugin